a61150b7192393d96eeff5cb2dc97e1efae2102f30795949216bfe60edc1502e291f7a8f1f274c53254f894df2be18d1f7708823a2eaa720d0f0997dde6cae3d8d969efdcdc43635ba76299306de975e46dd3b3e1977c13fd67a2055467fb9c411b78ad39bcd16c7c5c5dedc5665d9983dc7c008d087e1fae91300b01fbdc29ac84c88a4f5a4e5e82aa231c705c9f3f3b75eafd952ea62ea8d0b9d641106f101baf780a4d76f8229456f335a0683ff23682e012546d9443dacba509357bcd90499f7760687e9385ea4d9e0e70e647aa95043bcbb24634b1ada171ac441335b766efe5a9b9486434916ff4935fd906d7add8b065261fa4231f4a1d5ecbf1681fcc22cf0223635e17c0708b7fd74cfd68c916573438a96cd2528b4988dbb8c2978c0c8d8af402e544f52348569793d45a020adc04156372841026124ec0a352068d93a90b738bc7db756c9708f592b0d29656c3c923cfaeb4b5d62fab18f89e360c4e967b10d7019f61a249db244f44b7db4fcced2bb1f7d087a2c8e62be0d83d4f5d3abc1cacaf18b04b54fd2fcc528c02e4a604bded464970680c028335f487c450b2103ab7723893e26b178aa43b2e50cf79dc25cbb38491cdda2501659c92fc7a717c4e177b82a19753e1fa2ea02adf3045692e2bade95bd9e710935c5d9cca0ae80f258d39ca91a6d2b3ac954c56bc18f5c54d9644c1d2c55fc43520d744ac4f5e19d490d02fcc7be03b9b8f5c74f65958e3d0c959a00ae9559ba2b294cd9f773182f83aff53f59a4ab6ad714fd898ff1bed8ab5fa3b6735ba25ce4c14d91124bcd522394e1f8c1413daf587a40f1a51aef304df8f17f87c4cdd96ca5aab43ab52456b5cfd45a3a522d287a0ea7dd42d419982a66ba3b6118848a739423749c2e8bedfa0873a25704798020d6df7b457df64eab71ebc27b9e332e3faa03b557cc482e0f140a1f0e3013c606cabd8f5a804a3b7b6db6fa035e7bf7f86308c39d96cb39907382fd0efab9f8b5f0f207f788afe74bc372a23d1a255ddd6584caf97ca94b37c8a30938ee6677d329b8902a4f88d2ddbbbe2ab4b7d13d40ba557f62413333c45c8d918d60af02c75231dd78c51a27768d4baeb618525b7691fa6047dc9b152644f33ed98f7c671fa6d39c1db32e3c92cecad0567469ed9d328a6b87e98c535eeea8bab6bd91a0ed523b2b1d189d2f4d65c6d59a972dfdc5d976031d963d26652f74f9873fc142a842323c19adf9aeed83f4fc0ed3e0e9b4a48667a51a8459c67fb60c6fe782fe"}, @NL80211_ATTR_TESTDATA={0x60, 0x45, "70e224224a75037cf083b5403fb8b6d6342e0a2929418cec2d8d594dd36822945f8428fb60678209e1a34fea2b8279c13158ab9463d11ea110c0ec43e1bdbf6b6987590d7bfc299d5c770639c5e8daa24979b23d4e77578dec212305"}, @NL80211_ATTR_TESTDATA={0x8b, 0x45, "209c60aa89f8169ad61ca6fe9c4b53334a0145fc5066307b81f3a0627600038feffda5a96e2fb0c55c9082f884908afc9f2fb86423f537f445783fed659d6f7ed005d9b0dd4a98b12df872744b9da687102f78e43d81af3241629d4cfbe33e66b7ab0bd50fb300e4ae629c0021ffb6e63d4f628d949e50e5f28d29dc99acd349bb002f228ee1f9"}, @NL80211_ATTR_TESTDATA={0xb3, 0x45, "32dc7c5ee3c24ea1ba3db7fdb22d155964f32fb869362968124467bf154a478eefaf9376051b0bb6412c48fef632e4d3a5068226fca8b00559c59a3304010e1e46ea1553fc559bb9af315cb1f6c86d5793595cbf65cfc5eaa32b845410b87deb4b59638fccb0d301a31d7d4ee6b50a66d0f0f5eb9dffca168bbb2bcc27bb7d7f5d88d79b2abc6eda9dbe1d6494ab007fe786cfa101fb485bcc5e28df8ab63ef94239940371fbb4a4282866b54f9779"}]}, 0x14ac}, 0x1, 0x0, 0x0, 0x40081}, 0x20000080) sendmsg$nl_route(r3, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r5, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x4c}}, 0x0) [ 1961.902402][T27705] bond956: entered promiscuous mode [ 1961.913965][T27705] 8021q: adding VLAN 0 to HW filter on device bond956 01:55:11 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) r2 = socket$nl_generic(0x10, 0x3, 0x10) sendfile(r2, r1, 0x0, 0x100000002) ioctl$SIOCGSKNS(r1, 0x894c, &(0x7f0000000000)={'tunl0\x00', 0x600}) r3 = socket$inet_smc(0x2b, 0x1, 0x0) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(r3, 0x81f8943c, &(0x7f0000000100)={0x0, ""/256, 0x0, 0x0}) ioctl$BTRFS_IOC_INO_LOOKUP_USER(0xffffffffffffffff, 0xd000943e, &(0x7f0000001800)={0x0, 0x0, "02b66c11beed7c64705a2327ec95767e4393a580b2c3043a2fcfb08839b8897467f4a525091f0f7d2480b2fbdcfd2a3924b674e8aed38628fb035a463934e151ef7c0289d4fd7b308135026f77657ca78a849330727be579703bef5f51cf16a7198f8eb8e962cc55e47a88645dc99b6e4dfd15399b64e979124ef3a9120208d05d9de3ff1ce9cc9e6353b97b13c914e3530a6ec5b967674f3cbc69538c66356f6777af618dd96e1730048727e1752cc8139776c1e5061154ffe7838008ce90ccbf0827c03a28016d5f3bdabbc98bee9c405509e3e094ba1677d6b347061c346722828810ba1b68424c585770f6527f3856630aedb97f2ed5ff2e013d5d412046", "e26481ed1e7c639b5947fa03672a9556f2d9c88f35f8f8b62d6b01c1aef3d08f4ee43881217f959db47d280e8448925694f755ec0256840e58a31c14f0d78d223c58da8e0bd812fb893403e655823624c9e0581484207a6d914ad9befaa148971274f98aa0b753b38761ffcd20135aa09bee95ffc38cfb410de6eb0b1c0eaf69af8375bb982d21281acaa2966309f31ed037b8789d3bf55cda6f1fdefac7c7d4eec101525b850f72c5d3515de41ea6c4cc0a1d4c9d0e83fe98d8baa6325482d6270833cf890aba6768abf6a6ac45c0268bac824f692a521bf8500ce437d7bd4ecafbf918c063d8af3d110e24ddc569f535794d4c8c4ab3897b27310c8d39efca731b7a22caf0ec5f2df04ce6b496582e72b5cbb10480d59c191cd3eca3d3a973fe653216cd08c8de4098133f85da499af2f6a6c7513755f40d13810388f5476a67bb722dc832e4af4c76fda32aa9699c8191a644df90df4b2b2e7993e90bee48e3b65cbc84de44a15926c157018e46c849d57933b96f67dcec40eb733515b880aabbfd1324433ef61e0a112430d3829717820a9eb79c8767614facf386e07a7df2ceb2e9f9d33d65d33fecc7697f5bf74769a67ac297756ec495eaf87674ad5fae2fd772301896e85c617328f32f69bb718bf1ade1d8fd637a6bb4c7044929bf43757821b2bde4ce2ec164ef3841458207326249547ccd2c3ca3467e8c5474cb820ee8647c90d2da6af054fa1f25afb1b0792dcf21b2736c67fc8119a6bdbe19689bb639113f5b9165a50b6f2df8dd8c549220488da3bb10e45dcc7a6207635a550e5ee913aa8ec6ea5f92ba59eb003424e6ea472df633220c8e9644d5bf2f4b01027fc5c0422c39932531e1be7e62e021ac4592b57d95720d5ae86f4bd11d95ad82569ab24e0d1b5a144e6fbcc76c4d7800a70069a852979a3a1f02fcbef6bfeff2101576c998109c65350175bd05435d4be236097340cfafadfde768b1176e6c40f34731164afcd0c3620a0cd015494e432e9aff2c59a4cf476d9037a398183fe74789da0300172e1c02173ff867faf6fbed165ca819be3e4ea05fb6ff1633430984bece64ef93ff8d012b9c321941d59f5b8572008f6bb22597864ea1fbd6b9e349b6d9dc1adc185ea32d5e67e44ac5ebea2063635a53e1718a4246ea47a8874daae0ac6653f821f381a940cbfc98d4a25aea33c63309ee1c6c20f349b673509f09b5381956611359754bdb2eeda100ccb4834596de45556611d5b568f2604653c2129a7d5bd50a209a6d4a956c108b5349a8d208431a2edb17d7650332d308ea2e6f324e589f0e98de49ab655a77509637f0a65614c33f16f91e0d6305e8f9ba0766154e1599f67005ff75af7a6143e574acb799f2363dbb37971fc451c0054b5753de3e2278afe06b9e64b93ab09b4985fe26626a6266da1088f7b9755ac8a9e4e6e99a0dedfe2b1509de12f75d9dea83475543d2b328e986779f4992c870ad128b9d09e8ebcc264e8bb5b85d6d62ab902b8ed7409448cc26a777882627bf3e0c9cdd473289154560c2838c6d4fdaea97ff5d7992909c67710dfeb4977ea7bc06d77b06a19efa42178c46a3fa66ed7d6e2b67ab86be5d94721b099947563db706c5c3a12744ea61d63fce93f546f2669c50b6568e3f32d79bfc75adefc21ee93c5c5360cf2e7ac19571c3663857baf3325b666570642da4dcb309dab05b7ad05bf832d28fa8e871f7b89d6f4327953f4dbde3aac022f4dfe050d0924427e39a8027fbc4b54c4a8c2bf35f8c11c9c0ac4bc1fc31d6bdb2cee2675c5a782aa3ab5ffcc7be7207c89cdb8546fcfdd3a5aadfee5a296abd9afc127ebbf580649e9132b55d9f40a3778af49248b593e00c9f4812b8f7adb7323ab7852e4ed09c55aa356b2e8fee6eb14be3659fdf821d23ac556845f70670d8b6a417c29ac0b1585ea865634c0c921e9d930d4018f66d1e024179d80a7154c491c8662d427ab44a3633480782889f1d00c1d182ede30d4127d769291ef408b574a41c29153b7d949d4648b60a4faf32b380dbc87146c36479d47023876abd4ae7e289ba79d988f6c3a86a75bdc784bea2f0b7e2f77cb1282f54a18e1117e50ffa46ac208fcf8a7b5751e83b3e80cb428e4c2ae63ca3cf7b2c4353303edfe328626391f7925f1ab4ef1c7d0f54d0e45590188ecdb6d2c9c0db9b0552dc81b21dc1bbe9e94be5f78dad11f53d6fc3602e9ffb872d3557c44a133ee94e50d57d5eae2214569975982c63f22750db6f5c979ed81b3f68fe6be6daa85bcd98f25548c1d4db1f15394bc708933e0352e4059bedbf832abaf75e9bdca6fea93600357ca4153357d2e0661a308edf82c0f53e7a121c7e1e8bafdc5fdb61c21f4716d06b43a8395aa915a34c4dc4b5b15ba70a4e163728a9966515682b9ca3c2499aeb17a3c17f905fefed806b504f77c52564006273282db5e0c70565ee1016fc7c241830bef951294db9682ef41742b6550246e539143f15d6c2f017a8083ec97eb3a2de8bc2d8d5fec2b9b88996b3ff6c5aa5bed326f72fe9190de74a83e380b9fe89324421697d124d9a1bd3ced8e1856923147958582d737a36da6af4a0fd92b83f0ef0cb1a725d3a5ecb3a39ad039d200989a281a0686336457824ef582698222b7a063475b793a21745d6701940a0baf124461ad71de8dff6a6f7ed676d107e01ae6b7b79aa1f96efce91039dead977bf70365de8ebb3ec06634246d62f78287831984accef27048794468f520db5c71b4fa94818ddc7394fa1b609adb8cc80c0f32efac26a47bd74119100f0cfdcb889aab1019786cc0eefc1e4295ae919e9f6c8c8a16aa76a2bfe39adf5929e9b9925da0241e734378fe140981e3536bf0b77eb0263297a936a5f37605f128d79b16723b953331f99633b8d29ad1d1dbdb74188488ea0d1b7e0ddb652c040aa0477a079e92618e52f3b7ceebd62e0f0c6946469c19ae828b7eed288c3a53320c9d5468c39d608bd42d967a21e6b788de7c6826d1b99130083182562f63443290aeeb24308d3eb4881547db34b284d9af2bd7cac0d2f66bd14758c079c345d2bce3e1efc3599b0d06e69a92db7e05473673726e1848df75e83df50f98a9321468c10c6514dd7b3cb5b0ddf2fef13284463fe88ef2bc95d51288e41e3e0ba5c91bd686d7f7658b5bac8e3991a0f3b36f004585d9edd09d478ede73e7da067ea502894fc1247e62c1a84c9065ffccc3da96f07ddce135ceafdd784dda6f64a7add400d21ae13abf98e90fb96fcda23a8ad79905428a349b2230c19cf8cdca1724382bd19b4b075438098bd46cbc668bcbdcf1da85f733a50669f976a7106ff1936f20d799e7d01b0beeba7057a90016fb2d36bb3d14e11ac077ffb91f139d16ae5e78d84559312c9fd1a91ff70e5d9b4fd27940415d2f605665cd9374a50f7fc3c32efe30e4c0fc84f0c0e0bcd35e46665e4f29371c1c96324f65a94c85874e8e3baba68b88acc85c38f466d7353b5a00953c8ff5522cf0903e646301e3539d047286d10be16d9fbef02d450b7b12b1ead250f68c4c893fbc6f48765f34a81c477966278a1c6945d14d6531f0b5e4cef4edaf3708a5787162c39b236272694b0a302465d01004fa9d516414c040c27fccbf38466023b06293bd07f31fcdbb3b5378a8b2c7886e1551b1caeab96f7f3c159075756f5f8aaa05b6f87f581d802903c36d84169f87c01a77c43284adf66daa38fe8633a8f6d258231466aba89bb9e56ef289d2815cde433e381ddf74852302684b974c80d0a1d7d578039120a2db36213d167687e9390ed8f14c709d3149d2f5cfbc42a9b85e6d10c4ee77270534fdff2ccb816dedc6377257a73ba2a4ea82c0cc4a81bfb939f710b109beb279edeeda345867c0130ff6fc0fbbbbd25d16d2e73f6ab2fbb4c72abc0713ef9e4690b72208c468fa64c21f2247867b5edd514e5be23733ef2136b10e03161fe3b4e6018933df1f97a8250747e6140780a064e0c35bfafcb8177a75e8fde25e61e1c6332bbf7ddc3a11910feb3bd66707a7c1a9f87b320298acec88135a177b2f6f3c0ff02765f34c30b078b58470cd227ce4c0a1a77e662180b28fb360c3fbf47ba8982510979b86332b6d8a53d5f3947665c119a71e5b6ddc64228b47c3e23c30ebbede354d71d2780456de3f717a384112ccc9805dfe107fc9440ab7abccd8463150ce1306778fbea9793d88c4a7864a925bac5da593d6b72ee2c743f0b732a10285d293359c8126004f06fa12c0b3ca9c1e9c0a75b587207965c49f7450cb4210da01e5604f83f849b7eb9cb3f73de4bcff064b4fe08580971184db940bf29d6b8cea027dbfe0b78d6fa574da4b5b4db57b4e1939213c9848537baaf8076b1db4e767a467ec6a47c67aa33df96d7113cc9884124bca5579ce0058618b1b1f13ff3b102ff54e6bde7e29fd555acd6ffe6429af27301b6bbda38403d8f6f3266d7724f517778e7b2c1e13e3d83104ce743857c07b70cb5be8d6d8757d63a3bedfc9de79cc7e96c9833e15fd65f1336178568c2453a49cfc8c8dcb4f5d978f0a6144d62a5da6fd75d08a41084d4c59b345e07a5f4446af6e5216cc8d94347d333030015e262036f0a4fa6d2ae523654c55b37ac179efb66d230de5c70a9b33738ef0cd4ba2710d9ec03f426701101182051516a9be380a07e2555a8cda03eafc72d2bc2bc1dcadde4bb819692c1736b0ed203c4934842d791aae9e10bf239cc5393c9faf967109444c8f44532766ca5481f0ac16d25753a121727271c71d97b401dafe91588b362f2798f047deece9f860624b2d5753e46f929f8c2d03753e7245ffed6d8e36c7b380c4fb6a27e087a38b5e4a80f0043f95e5a20701c62692e684a764074e47badcecf8b2145be47b5b7089c249abcf0743a61b517004d120929d7846a39a46e0ddbcf5334fc01aca0bff31e67da8b3c88e38504db1dc3940c55bee158ae6dfcce289cf91106397d8e3990149a86c819e0354d785a4eed76fa6380491b01efbc23e7189ec253884d384865bca5da9a0917d68144a0a02cde867c365d339a025b7c8a16b82e341719a259ede8f09c165a354fd3e8f5d59e349e7c36302cf8ed115537969b598337fe7575157c89a254c0829cdb243d3d788321c756bf2817721db4bead96e1f25be5b8c7100d149d13900b6c6491ddbdbeaef7753ed5c5d9b07449bfd023501075ec08c37c13df696bf73500bd440a6522f5b955862c5eee8dc6e875c5055350b3397a6b31d2b764308ad24aafd4113af76f38f4aaba9e24efa3ad5b1c008a6cad2411ef6c7276dd3a5a2ae8130f91c36c34137731426fdf5272ef4ac5c4415e2d0f7b50da3ae910ba22bb5b962351e841746b00"}) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(0xffffffffffffffff, 0x81f8943c, &(0x7f0000000540)={0x0, ""/256, 0x0, 0x0}) ioctl$BTRFS_IOC_INO_LOOKUP_USER(0xffffffffffffffff, 0xd000943e, &(0x7f0000001800)={0x0, 0x0, "02b66c11beed7c64705a2327ec95767e4393a580b2c3043a2fcfb08839b8897467f4a525091f0f7d2480b2fbdcfd2a3924b674e8aed38628fb035a463934e151ef7c0289d4fd7b308135026f77657ca78a849330727be579703bef5f51cf16a7198f8eb8e962cc55e47a88645dc99b6e4dfd15399b64e979124ef3a9120208d05d9de3ff1ce9cc9e6353b97b13c914e3530a6ec5b967674f3cbc69538c66356f6777af618dd96e1730048727e164fcc8139776c1e5061154ffe7838008ce90ccbf0827c03a28016d5f3bdabbc98bee9c405509e3e094ba1677d6b347061c346722828810ba1b68424c585770f6527f3856630aedb97f2ee0742e013d5d412046", "e26481ed1e7c639b5947fa03672a9556f2d9c88f35f8f8b62d6b01c1aef3d08f4ee43881217f959db47d280e8448925694f755ec0256840e58a31c14f0d78d223c58da8e0bd812fb893403e655823624c9e0581484207a6d914ad9befaa148971274f98aa0b753b38761ffcd20135aa09bee95ffc38cfb410de6eb0b1c0eaf69af8375bb982d21281acaa2966378f31ed037b8789d3bf55cda6f1fdefac7c7d4eec101525b850f72c5d3515de41ea6c4cc0a1d4c9d0e83fe98d8baa6325482d6270833cf890aba6768abf6a6ac45c0268bac824f692a521bf8500ce437d7bd4ecafbf918c063d8af3d110e24ddc569f535794d4c8c4ab3897b27310c8d39efca731b7a22caf0ec5f2df04ce6b496582e72b5cbb10480d59c191cd3eca3d3a973fe653216cd08c8de4098133f85da499af2f6a6c7513755f40d13810388f5476a67bb722dc832e4af4c76fda32aa9699c8191a644df90df4b2b2e7993e90bee48e3b65cbc84de44a15926c157018e46c849d57933b96f67dcec40eb733515b880aabbfd1324433ef61e0a112430d3829717820a9eb79c8767614facf386e07a7df2ceb2e9f9d33d65d33fecc7697f5bf74769a67ac297756ec495eaf87674ad5fae2fd772301896e85c617328f32f69bb718bf1ade1d8fd637a6bb4c7044929bf43757821b2bde4ce2ec164ef3841458207326249547ccd2c3ca3467e8c5474cb820ee8647c90d2da6af054fa1f25afb1b0792dcf21b2736c67fc8119a6bdbe19689bb639113f5b9165a50b6f2df8dd8c549220488da3bb10e45dcc7a6207635a550e5ee913aa8ec6ea5f92ba59eb003424e6ea472df633220c8e9644d5bf2f4b01027fc5c0422c39932531e1be7e62e021ac4592b57d95720d5ae86f4bd11d95ad82569ab24e0d1b5a144e6fbcc76c4d7800a70069a852979a3a1f02fcbef6bfeff2101576c998109c65350175bd05435d4be236097340cfafadfde768b1176e6c40f34731164afcd0c3620a0cd015494e432e9aff2c59a4cf476d9037a398183fe74789da0300172e1c02173ff867faf6fbed165ca819be3e4ea05fb6ff1633430984bece64ef93ff8d012b9c321941d59f5b8572008f6bb22597864ea1fbd6b9e349b6d9dc1adc185ea32d5e67e44ac5ebea2063635a53e1718a4246ea47a8874daae0ac6653f821f381a940cbfc98d4a25aea33c63309ee1c6c20f349b673509f09b5381956611359754bdb2eeda100ccb4834596de45556611d5b568f2604653c2129a7d5bd50a209a6d4a956c108b5349a8d208431a2edb17d7650332d308ea2e6f324e589f0e98de49ab655a77509637f0a65614c33f16f91e0d6305e8f9ba0766154e1599f67005ff75af7a6143e574acb799f2363dbb37971fc451c0054b5753de3e2278afe06b9e64b93ab09b4985fe26626a6266da1088f7b9755ac8a9e4e6e99a0dedfe2b1509de12f75d9dea83475543d2b328e986779f4992c870ad128b9d09e8ebcc264e8bb5b85d6d62ab902b8ed7409448cc26a777882627bf3e0c9cdd473289154560c2838c6d4fdaea97ff5d7992909c67710dfeb4977ea7bc06d77b06a19efa42178c46a3fa66ed7d6e2b67ab86be5d94721b099947563db706c5c3a12744ea61d63fce93f546f2669c50b6568e3f32d79bfc75adefc21ee93c5c5360cf2e7ac19571c3663857baf3325b666570642da4dcb309dab05b7ad05bf832d28fa8e871f7b89d6f4327953f4dbde3aac022f4dfe050d0924427e39a8027fbc4b54c4a8c2bf35f8c11c9c0ac4bc1fc31d6bdb2cee2675c5a782aa3ab5ffcc7be7207c89cdb8546fcfdd3a5aadfee5a296abd9afc127ebbf580649e9132b55d9f40a3778af49248b593e00c9f4812b8f7adb7323ab7852e4ed09c55aa356b2e8fee6eb14be3659fdf821d23ac556845f70670d8b6a417c29ac0b1585ea865634c0c921e9d930d4018f66d1e024179d80a7154c491c8662d427ab44a3633480782889f1d00c1d182ede30d4127d769291ef408b574a41c29153b7d949d4648b60a4faf32b380dbc87146c36479d47023876abd4ae7e289ba79d988f6c3a86a75bdc784bea2f0b7e2f77cb1282f54a18e1117e50ffa46ac208fcf8a7b5751e83b3e80cb428e4c2ae63ca3cf7b2c4353303edfe328626391f7925f1ab4ef1c7d0f54d0e45590188ecdb6d2c9c0db9b0552dc81b21dc1bbe9e94be5f78dad11f53d6fc3602e9ffb872d3557c44a133ee94e50d57d5eae2214569975982c63f22750db6f5c979ed81b3f68fe6be6daa85bcd98f25548c1d4db1f15394bc708933e0352e4059bedbf832abaf75e9bdca6fea93600357ca4153357d2e0661a308edf82c0f53e7a121c7e1e8bafdc5fdb61c21f4716d06b43a8395aa915a34c4dc4b5b15ba70a4e163728a9966515682b9ca3c2499aeb17a3c17f905fefed806b504f77c52564006273282db5e0c70565ee1016fc7c241830bef951294db9682ef41742b6550246e539143f15d6c2f017a8083ec97eb3a2de8bc2d8d5fec2b9b88996b3ff6c5aa5bed326f72fe9190de74a83e380b9fe89324421697d124d9a1bd3ced8e1856923147958582d737a36da6af4a0fd92b83f0ef0cb1a725d3a5ecb3a39ad039d200989a281a0686336457824ef582698222b7a063475b793a21745d6701940a0baf124461ad71de8dff6a6f7ed676d107e01ae6b7b79aa1f96efce91039dead977bf70365de8ebb3ec06634246d62f78287831984accef27048794468f520db5c71b4fa94818ddc7394fa1b609adb8cc80c0f32efac26a47bd74119100f0cfdcb889aab1019786cc0eefc1e4295ae919e9f6c8c8a16aa76a2bfe39adf5929e9b9925da0241e734378fe140981e3536bf0b77eb0263297a936a5f37605f128d79b16723b953331f99633b8d29ad1d1dbdb74188488ea0d1b7e0ddb652c040aa0477a079e92618e52f3b7ceebd62e0f0c6946469c19ae828b7eed288c3a53320c9d5468c39d608bd42d967a21e6b788de7c6826d1b99130083182562f63443290aeeb24308d3eb4881547db34b284d9af2bd7cac0d2f66bd14758c079c345d2bce3e1efc3599b0d06e69a92db7e05473673726e1848df75e83df50f98a9321468c10c6514dd7b3cb5b0ddf2fef13284463fe88ef2bc95d51288e41e3e0ba5c91bd686d7f7658b5bac8e3991a0f3b36f004585d9edd09d478ede73e7da067ea502894fc1247e62c1a84c9065ffccc3da96f07ddce135ceafdd784dda6f64a7add400d21ae13abf98e90fb96fcda23a8ad79905428a349b2230c19cf8cdca1724382bd19b4b075438098bd46cbc668bcbdcf1da85f733a50669f976a7106ff1936f20d799e7d01b0beeba7057a90016fb2d36bb3d14e11ac077ffb91f139d16ae5e78d84559312c9fd1a91ff70e5d9b4fd279420f7647151fe951b705082230ad20415d2f605665cd9374a50f7fc3c32efe30e4c0fc84f0c0e0bcd35e46665e4f29371c1c96324f65a94c85874e8e3baba68b88acc85c38f466d7353b5a00953c8ff5522cf0903e646301e3539d047286d10be16d9fbef02d450b7b12b1ead250f68c4c893fbc6f48765f34a81c477966278a1c6945d14d6531f0b5e4cef4edaf3708a5787162c39b236272694b0a302465d01004fa9d516414c040c27fccbf38466023b06293bd07f31fcdbb3b5378a8b2c7886e1551b1caeab96f7f3c159075756f5f8aaa05b6f87f581d802903c36d84169f87c01a77c43284adf66daa38fe8633a8f6d258231466aba89bb9e56ef289d2815cde433e381ddf74852302684b974c80d0a1d7d578039120a2db36213d167687e9390ed8f14c709d3149d2f5cfbc42a9b85e6d10c4ee77270534fdff2ccb816dedc6377257a73ba2a4ea82c0cc4a81bfb939f710b109beb279edeeda345867c0130ff6fc0fbbbbd25d16d2e73f6ab2fbb4c72abc0713ef9e4690b72208c468fa64c21f2247867b5edd514e5be23733ef2136b10e03161fe3b4e6018933df1f97a8250747e6140780a064e0c35bfafcb8177a75e8fde25e61e1c6332bbf7ddc3a11910feb3bd66707a7c1a9f87b320298acec88135a177b2f6f3c0ff02765f34c30b078b58470cd227ce4c0a1a77e662180b28fb360c3fbf47ba8982510979b86332b6d8a53d5f3947665c119a71e5b6ddc64228b47c3e23c30ebbede354d71d2780456de3f717a384112ccc9805dfe107fc9440ab7abccd8463150ce1306778fbea9793d88c4a7864a925bac5da593d6b72ee2c743f0b732a10285d293359c8126004f06fa12c0b3ca9c1e9c0a75b587207965c49f7450cb4210da01e5604f83f849b7eb9cb3f73de4bcff064b4fe08580971184db940bf29d6b8cea027dbfe0b78d6fa574da4b5b4db57b4e1939213c9848537baaf8076b1db4e767a467ec6a47c67aa33df96d7113cc9884124bca5579ce0058618b1b1f13ff3b102ff54e6bde7e29fd555acd6ffe6429af27301b6bbda38403d8f6f3266d7724f517778e7b2c1e13e3d83104ce743857c07b70cb5be8d6d8757d63a3bedfc9de79cc7e96c9833e15fd65f1336178568c2453a49cfc8c8dcb4f5d978f0a6144d62a5da6fd75d08a41084d4c59b345e07a5f4446af6e5216cc8d94347d333030015e262036f0a4fa6d2ae523654c55b37ac179efb66d230de5c70a9b33738ef0cd4ba2710d9ec03f426701101182051516a9be380a07e2555a8cda03eafc72d2bc2bc1dcadde4bb819692c1736b0ed203c4934842d791aae9e10bf239cc5393c9faf967109444c8f44532766ca5481f0ac16d25753a121727271c71d97b401dafe91588b362f2798f047deece9f860624b2d5753e46f929f8c2d03753e7245ffed6d8e36c7b380c4fb6a27e087a38b5e4a80f0043f95e5a20701c62692e684a764074e47badcecf8b2145be47b5b7089c249abcf0743a61b517004d120929d7846a39a46e0ddbcf5334fc01aca0bff31e67da8b3c88e38504db1dc3940c55bee158ae6dfcce289cf91106397d8e3990149a86c819e0354d785a4eed76fa6380491b01efbc23e7189ec253884d384865bca5da9a0917d68144a0a02cde867c365d339a025b7c8a16b82e341719a259ede8f09c165a354fd3e8f5d59e349e7c36302cf8ed115537969b598337fe7575157c89a254c0829cdb243d3d788321c756bf2817721db4bead96e1f25be5b8c7100d149d13900b6c6491ddbdbeaef7753ed5c5d9b07449bfd023501075ec08c37c13df696bf73500bd440a6522f5b955862c5eee8dc6e875c5055350b3397a6b31d2b764308ad24aafd4113af76f38f4aaba9e24efa3ad5b1c008a6cad2411ef6c7276dd3a5a2ae8130f91c36c34137731426fdf5272ef4ac5c4415e2d0f7b50da3ae910ba22bb5b962351e841746b"}) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(0xffffffffffffffff, 0xd000943d, &(0x7f00000745c0)={0x5af, [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r9}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r7}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r11, r10}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r10}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r8}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r10}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r8}, {}, {}, {}, {}, {}, {}, {0x0, r7}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r8}], 0x81, "7464fbe08eb369"}) r12 = socket$netlink(0x10, 0x3, 0xf) ioctl$sock_SIOCSIFVLAN_GET_VLAN_VID_CMD(r12, 0x8983, &(0x7f0000000040)) ioctl$sock_SIOCGIFVLAN_DEL_VLAN_CMD(r12, 0x8982, 0x0) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(r12, 0x81f8943c, &(0x7f0000002c40)={0x0, ""/256, 0x0, 0x0}) ioctl$BTRFS_IOC_INO_LOOKUP_USER(0xffffffffffffffff, 0xd000943e, &(0x7f0000001800)={0x0, r13, "02b66c11beed7c64705a2327ec95767e4393a580b2c3043a2fcfb08839b8897467f4a525091f0f7d2480b2fbdcfd2a3924b674e8aed38628fb035a463934e151ef7c0289d4fd7b308135026f77657ca78a849330727be579703bef5f51cf16a7198f8eb8e962cc55e47a88645dc99b6e4dfd15399b64e979124ef3a9120208d05d9de3ff1ce9cc9e6353b97b13c914e3530a6ec5b967674f3cbc69538c66356f6777af618dd96e1730048727e1752cc8139776c1e5061154ffe7838008ce90ccbf0827c03a28016d5f3bdabbc98bee9c405509e3e094ba1677d6b347061c346722828810ba1b68424c585770f6527f3856630aedb97f2ed5ff2e013d5d412046", "e26481ed1e7c639b5947fa03672a9556f2d9c88f35f8f8b62d6b01c1aef3d08f4ee43881217f959db47d280e8448925694f755ec0256840e58a31c14f0d78d223c58da8e0bd812fb893403e655823624c9e0581484207a6d914ad9befaa148971274f98aa0b753b38761ffcd20135aa09bee95ffc38cfb410de6eb0b1c0eaf69af8375bb982d21281acaa2966309f31ed037b8789d3bf55cda6f1fdefac7c7d4eec101525b850f72c5d3515de41ea6c4cc0a1d4c9d0e83fe98d8baa6325482d6270833cf890aba6768abf6a6ac45c0268bac824f692a521bf8500ce437d7bd4ecafbf918c063d8af3d110e24ddc569f535794d4c8c4ab3897b27310c8d39efca731b7a22caf0ec5f2df04ce6b496582e72b5cbb10480d59c191cd3eca3d3a973fe653216cd08c8de4098133f85da499af2f6a6c7513755f40d13810388f5476a67bb722dc832e4af4c76fda32aa9699c8191a644df90df4b2b2e7993e90bee48e3b65cbc84de44a15926c157018e46c849d57933b96f67dcec40eb733515b880aabbfd1324433ef61e0a112430d3829717820a9eb79c8767614facf386e07a7df2ceb2e9f9d33d65d33fecc7697f5bf74769a67ac297756ec495eaf87674ad5fae2fd772301896e85c617328f32f69bb718bf1ade1d8fd637a6bb4c7044929bf43757821b2bde4ce2ec164ef3841458207326249547ccd2c3ca3467e8c5474cb820ee8647c90d2da6af054fa1f25afb1b0792dcf21b2736c67fc8119a6bdbe19689bb639113f5b9165a50b6f2df8dd8c549220488da3bb10e45dcc7a6207635a550e5ee913aa8ec6ea5f92ba59eb003424e6ea472df633220c8e9644d5bf2f4b01027fc5c0422c39932531e1be7e62e021ac4592b57d95720d5ae86f4bd11d95ad82569ab24e0d1b5a144e6fbcc76c4d7800a70069a852979a3a1f02fcbef6bfeff2101576c998109c65350175bd05435d4be236097340cfafadfde768b1176e6c40f34731164afcd0c3620a0cd015494e432e9aff2c59a4cf476d9037a398183fe74789da0300172e1c02173ff867faf6fbed165ca819be3e4ea05fb6ff1633430984bece64ef93ff8d012b9c321941d59f5b8572008f6bb22597864ea1fbd6b9e349b6d9dc1adc185ea32d5e67e44ac5ebea2063635a53e1718a4246ea47a8874daae0ac6653f821f381a940cbfc98d4a25aea33c63309ee1c6c20f349b673509f09b5381956611359754bdb2eeda100ccb4834596de45556611d5b568f2604653c2129a7d5bd50a209a6d4a956c108b5349a8d208431a2edb17d7650332d308ea2e6f324e589f0e98de49ab655a77509637f0a65614c33f16f91e0d6305e8f9ba0766154e1599f67005ff75af7a6143e574acb799f2363dbb37971fc451c0054b5753de3e2278afe06b9e64b93ab09b4985fe26626a6266da1088f7b9755ac8a9e4e6e99a0dedfe2b1509de12f75d9dea83475543d2b328e986779f4992c870ad128b9d09e8ebcc264e8bb5b85d6d62ab902b8ed7409448cc26a777882627bf3e0c9cdd473289154560c2838c6d4fdaea97ff5d7992909c67710dfeb4977ea7bc06d77b06a19efa42178c46a3fa66ed7d6e2b67ab86be5d94721b099947563db706c5c3a12744ea61d63fce93f546f2669c50b6568e3f32d79bfc75adefc21ee93c5c5360cf2e7ac19571c3663857baf3325b666570642da4dcb309dab05b7ad05bf832d28fa8e871f7b89d6f4327953f4dbde3aac022f4dfe050d0924427e39a8027fbc4b54c4a8c2bf35f8c11c9c0ac4bc1fc31d6bdb2cee2675c5a782aa3ab5ffcc7be7207c89cdb8546fcfdd3a5aadfee5a296abd9afc127ebbf580649e9132b55d9f40a3778af49248b593e00c9f4812b8f7adb7323ab7852e4ed09c55aa356b2e8fee6eb14be3659fdf821d23ac556845f70670d8b6a417c29ac0b1585ea865634c0c921e9d930d4018f66d1e024179d80a7154c491c8662d427ab44a3633480782889f1d00c1d182ede30d4127d769291ef408b574a41c29153b7d949d4648b60a4faf32b380dbc87146c36479d47023876abd4ae7e289ba79d988f6c3a86a75bdc784bea2f0b7e2f77cb1282f54a18e1117e50ffa46ac208fcf8a7b5751e83b3e80cb428e4c2ae63ca3cf7b2c4353303edfe328626391f7925f1ab4ef1c7d0f54d0e45590188ecdb6d2c9c0db9b0552dc81b21dc1bbe9e94be5f78dad11f53d6fc3602e9ffb872d3557c44a133ee94e50d57d5eae2214569975982c63f22750db6f5c979ed81b3f68fe6be6daa85bcd98f25548c1d4db1f15394bc708933e0352e4059bedbf832abaf75e9bdca6fea93600357ca4153357d2e0661a308edf82c0f53e7a121c7e1e8bafdc5fdb61c21f4716d06b43a8395aa915a34c4dc4b5b15ba70a4e163728a9966515682b9ca3c2499aeb17a3c17f905fefed806b504f77c52564006273282db5e0c70565ee1016fc7c241830bef951294db9682ef41742b6550246e539143f15d6c2f017a8083ec97eb3a2de8bc2d8d5fec2b9b88996b3ff6c5aa5bed326f72fe9190de74a83e380b9fe89324421697d124d9a1bd3ced8e1856923147958582d737a36da6af4a0fd92b83f0ef0cb1a725d3a5ecb3a39ad039d200989a281a0686336457824ef582698222b7a063475b793a21745d6701940a0baf124461ad71de8dff6a6f7ed676d107e01ae6b7b79aa1f96efce91039dead977bf70365de8ebb3ec06634246d62f78287831984accef27048794468f520db5c71b4fa94818ddc7394fa1b609adb8cc80c0f32efac26a47bd74119100f0cfdcb889aab1019786cc0eefc1e4295ae919e9f6c8c8a16aa76a2bfe39adf5929e9b9925da0241e734378fe140981e3536bf0b77eb0263297a936a5f37605f128d79b16723b953331f99633b8d29ad1d1dbdb74188488ea0d1b7e0ddb652c040aa0477a079e92618e52f3b7ceebd62e0f0c6946469c19ae828b7eed288c3a53320c9d5468c39d608bd42d967a21e6b788de7c6826d1b99130083182562f63443290aeeb24308d3eb4881547db34b284d9af2bd7cac0d2f66bd14758c079c345d2bce3e1efc3599b0d06e69a92db7e05473673726e1848df75e83df50f98a9321468c10c6514dd7b3cb5b0ddf2fef13284463fe88ef2bc95d51288e41e3e0ba5c91bd686d7f7658b5bac8e3991a0f3b36f004585d9edd09d478ede73e7da067ea502894fc1247e62c1a84c9065ffccc3da96f07ddce135ceafdd784dda6f64a7add400d21ae13abf98e90fb96fcda23a8ad79905428a349b2230c19cf8cdca1724382bd19b4b075438098bd46cbc668bcbdcf1da85f733a50669f976a7106ff1936f20d799e7d01b0beeba7057a90016fb2d36bb3d14e11ac077ffb91f139d16ae5e78d84559312c9fd1a91ff70e5d9b4fd27940415d2f605665cd9374a50f7fc3c32efe30e4c0fc84f0c0e0bcd35e46665e4f29371c1c96324f65a94c85874e8e3baba68b88acc85c38f466d7353b5a00953c8ff5522cf0903e646301e3539d047286d10be16d9fbef02d450b7b12b1ead250f68c4c893fbc6f48765f34a81c477966278a1c6945d14d6531f0b5e4cef4edaf3708a5787162c39b236272694b0a302465d01004fa9d516414c040c27fccbf38466023b06293bd07f31fcdbb3b5378a8b2c7886e1551b1caeab96f7f3c159075756f5f8aaa05b6f87f581d802903c36d84169f87c01a77c43284adf66daa38fe8633a8f6d258231466aba89bb9e56ef289d2815cde433e381ddf74852302684b974c80d0a1d7d578039120a2db36213d167687e9390ed8f14c709d3149d2f5cfbc42a9b85e6d10c4ee77270534fdff2ccb816dedc6377257a73ba2a4ea82c0cc4a81bfb939f710b109beb279edeeda345867c0130ff6fc0fbbbbd25d16d2e73f6ab2fbb4c72abc0713ef9e4690b72208c468fa64c21f2247867b5edd514e5be23733ef2136b10e03161fe3b4e6018933df1f97a8250747e6140780a064e0c35bfafcb8177a75e8fde25e61e1c6332bbf7ddc3a11910feb3bd66707a7c1a9f87b320298acec88135a177b2f6f3c0ff02765f34c30b078b58470cd227ce4c0a1a77e662180b28fb360c3fbf47ba8982510979b86332b6d8a53d5f3947665c119a71e5b6ddc64228b47c3e23c30ebbede354d71d2780456de3f717a384112ccc9805dfe107fc9440ab7abccd8463150ce1306778fbea9793d88c4a7864a925bac5da593d6b72ee2c743f0b732a10285d293359c8126004f06fa12c0b3ca9c1e9c0a75b587207965c49f7450cb4210da01e5604f83f849b7eb9cb3f73de4bcff064b4fe08580971184db940bf29d6b8cea027dbfe0b78d6fa574da4b5b4db57b4e1939213c9848537baaf8076b1db4e767a467ec6a47c67aa33df96d7113cc9884124bca5579ce0058618b1b1f13ff3b102ff54e6bde7e29fd555acd6ffe6429af27301b6bbda38403d8f6f3266d7724f517778e7b2c1e13e3d83104ce743857c07b70cb5be8d6d8757d63a3bedfc9de79cc7e96c9833e15fd65f1336178568c2453a49cfc8c8dcb4f5d978f0a6144d62a5da6fd75d08a41084d4c59b345e07a5f4446af6e5216cc8d94347d333030015e262036f0a4fa6d2ae523654c55b37ac179efb66d230de5c70a9b33738ef0cd4ba2710d9ec03f426701101182051516a9be380a07e2555a8cda03eafc72d2bc2bc1dcadde4bb819692c1736b0ed203c4934842d791aae9e10bf239cc5393c9faf967109444c8f44532766ca5481f0ac16d25753a121727271c71d97b401dafe91588b362f2798f047deece9f860624b2d5753e46f929f8c2d03753e7245ffed6d8e36c7b380c4fb6a27e087a38b5e4a80f0043f95e5a20701c62692e684a764074e47badcecf8b2145be47b5b7089c249abcf0743a61b517004d120929d7846a39a46e0ddbcf5334fc01aca0bff31e67da8b3c88e38504db1dc3940c55bee158ae6dfcce289cf91106397d8e3990149a86c819e0354d785a4eed76fa6380491b01efbc23e7189ec253884d384865bca5da9a0917d68144a0a02cde867c365d339a025b7c8a16b82e341719a259ede8f09c165a354fd3e8f5d59e349e7c36302cf8ed115537969b598337fe7575157c89a254c0829cdb243d3d788321c756bf2817721db4bead96e1f25be5b8c7100d149d13900b6c6491ddbdbeaef7753ed5c5d9b07449bfd023501075ec08c37c13df696bf73500bd440a6522f5b955862c5eee8dc6e875c5055350b3397a6b31d2b764308ad24aafd4113af76f38f4aaba9e24efa3ad5b1c008a6cad2411ef6c7276dd3a5a2ae8130f91c36c34137731426fdf5272ef4ac5c4415e2d0f7b50da3ae910ba22bb5b962351e841746b00"}) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(0xffffffffffffffff, 0x81f8943c, &(0x7f0000000540)={0x0, ""/256, 0x0, 0x0}) ioctl$BTRFS_IOC_INO_LOOKUP_USER(0xffffffffffffffff, 0xd000943e, &(0x7f0000001800)={0x0, 0x0, "02b66c11beed7c64705a2327ec95767e4393a580b2c3043a2fcfb08839b8897467f4a525091f0f7d2480b2fbdcfd2a3924b674e8aed38628fb035a463934e151ef7c0289d4fd7b308135026f77657ca78a849330727be579703bef5f51cf16a7198f8eb8e962cc55e47a88645dc99b6e4dfd15399b64e979124ef3a9120208d05d9de3ff1ce9cc9e6353b97b13c914e3530a6ec5b967674f3cbc69538c66356f6777af618dd96e1730048727e164fcc8139776c1e5061154ffe7838008ce90ccbf0827c03a28016d5f3bdabbc98bee9c405509e3e094ba1677d6b347061c346722828810ba1b68424c585770f6527f3856630aedb97f2ee0742e013d5d412046", "e26481ed1e7c639b5947fa03672a9556f2d9c88f35f8f8b62d6b01c1aef3d08f4ee43881217f959db47d280e8448925694f755ec0256840e58a31c14f0d78d223c58da8e0bd812fb893403e655823624c9e0581484207a6d914ad9befaa148971274f98aa0b753b38761ffcd20135aa09bee95ffc38cfb410de6eb0b1c0eaf69af8375bb982d21281acaa2966378f31ed037b8789d3bf55cda6f1fdefac7c7d4eec101525b850f72c5d3515de41ea6c4cc0a1d4c9d0e83fe98d8baa6325482d6270833cf890aba6768abf6a6ac45c0268bac824f692a521bf8500ce437d7bd4ecafbf918c063d8af3d110e24ddc569f535794d4c8c4ab3897b27310c8d39efca731b7a22caf0ec5f2df04ce6b496582e72b5cbb10480d59c191cd3eca3d3a973fe653216cd08c8de4098133f85da499af2f6a6c7513755f40d13810388f5476a67bb722dc832e4af4c76fda32aa9699c8191a644df90df4b2b2e7993e90bee48e3b65cbc84de44a15926c157018e46c849d57933b96f67dcec40eb733515b880aabbfd1324433ef61e0a112430d3829717820a9eb79c8767614facf386e07a7df2ceb2e9f9d33d65d33fecc7697f5bf74769a67ac297756ec495eaf87674ad5fae2fd772301896e85c617328f32f69bb718bf1ade1d8fd637a6bb4c7044929bf43757821b2bde4ce2ec164ef3841458207326249547ccd2c3ca3467e8c5474cb820ee8647c90d2da6af054fa1f25afb1b0792dcf21b2736c67fc8119a6bdbe19689bb639113f5b9165a50b6f2df8dd8c549220488da3bb10e45dcc7a6207635a550e5ee913aa8ec6ea5f92ba59eb003424e6ea472df633220c8e9644d5bf2f4b01027fc5c0422c39932531e1be7e62e021ac4592b57d95720d5ae86f4bd11d95ad82569ab24e0d1b5a144e6fbcc76c4d7800a70069a852979a3a1f02fcbef6bfeff2101576c998109c65350175bd05435d4be236097340cfafadfde768b1176e6c40f34731164afcd0c3620a0cd015494e432e9aff2c59a4cf476d9037a398183fe74789da0300172e1c02173ff867faf6fbed165ca819be3e4ea05fb6ff1633430984bece64ef93ff8d012b9c321941d59f5b8572008f6bb22597864ea1fbd6b9e349b6d9dc1adc185ea32d5e67e44ac5ebea2063635a53e1718a4246ea47a8874daae0ac6653f821f381a940cbfc98d4a25aea33c63309ee1c6c20f349b673509f09b5381956611359754bdb2eeda100ccb4834596de45556611d5b568f2604653c2129a7d5bd50a209a6d4a956c108b5349a8d208431a2edb17d7650332d308ea2e6f324e589f0e98de49ab655a77509637f0a65614c33f16f91e0d6305e8f9ba0766154e1599f67005ff75af7a6143e574acb799f2363dbb37971fc451c0054b5753de3e2278afe06b9e64b93ab09b4985fe26626a6266da1088f7b9755ac8a9e4e6e99a0dedfe2b1509de12f75d9dea83475543d2b328e986779f4992c870ad128b9d09e8ebcc264e8bb5b85d6d62ab902b8ed7409448cc26a777882627bf3e0c9cdd473289154560c2838c6d4fdaea97ff5d7992909c67710dfeb4977ea7bc06d77b06a19efa42178c46a3fa66ed7d6e2b67ab86be5d94721b099947563db706c5c3a12744ea61d63fce93f546f2669c50b6568e3f32d79bfc75adefc21ee93c5c5360cf2e7ac19571c3663857baf3325b666570642da4dcb309dab05b7ad05bf832d28fa8e871f7b89d6f4327953f4dbde3aac022f4dfe050d0924427e39a8027fbc4b54c4a8c2bf35f8c11c9c0ac4bc1fc31d6bdb2cee2675c5a782aa3ab5ffcc7be7207c89cdb8546fcfdd3a5aadfee5a296abd9afc127ebbf580649e9132b55d9f40a3778af49248b593e00c9f4812b8f7adb7323ab7852e4ed09c55aa356b2e8fee6eb14be3659fdf821d23ac556845f70670d8b6a417c29ac0b1585ea865634c0c921e9d930d4018f66d1e024179d80a7154c491c8662d427ab44a3633480782889f1d00c1d182ede30d4127d769291ef408b574a41c29153b7d949d4648b60a4faf32b380dbc87146c36479d47023876abd4ae7e289ba79d988f6c3a86a75bdc784bea2f0b7e2f77cb1282f54a18e1117e50ffa46ac208fcf8a7b5751e83b3e80cb428e4c2ae63ca3cf7b2c4353303edfe328626391f7925f1ab4ef1c7d0f54d0e45590188ecdb6d2c9c0db9b0552dc81b21dc1bbe9e94be5f78dad11f53d6fc3602e9ffb872d3557c44a133ee94e50d57d5eae2214569975982c63f22750db6f5c979ed81b3f68fe6be6daa85bcd98f25548c1d4db1f15394bc708933e0352e4059bedbf832abaf75e9bdca6fea93600357ca4153357d2e0661a308edf82c0f53e7a121c7e1e8bafdc5fdb61c21f4716d06b43a8395aa915a34c4dc4b5b15ba70a4e163728a9966515682b9ca3c2499aeb17a3c17f905fefed806b504f77c52564006273282db5e0c70565ee1016fc7c241830bef951294db9682ef41742b6550246e539143f15d6c2f017a8083ec97eb3a2de8bc2d8d5fec2b9b88996b3ff6c5aa5bed326f72fe9190de74a83e380b9fe89324421697d124d9a1bd3ced8e1856923147958582d737a36da6af4a0fd92b83f0ef0cb1a725d3a5ecb3a39ad039d200989a281a0686336457824ef582698222b7a063475b793a21745d6701940a0baf124461ad71de8dff6a6f7ed676d107e01ae6b7b79aa1f96efce91039dead977bf70365de8ebb3ec06634246d62f78287831984accef27048794468f520db5c71b4fa94818ddc7394fa1b609adb8cc80c0f32efac26a47bd74119100f0cfdcb889aab1019786cc0eefc1e4295ae919e9f6c8c8a16aa76a2bfe39adf5929e9b9925da0241e734378fe140981e3536bf0b77eb0263297a936a5f37605f128d79b16723b953331f99633b8d29ad1d1dbdb74188488ea0d1b7e0ddb652c040aa0477a079e92618e52f3b7ceebd62e0f0c6946469c19ae828b7eed288c3a53320c9d5468c39d608bd42d967a21e6b788de7c6826d1b99130083182562f63443290aeeb24308d3eb4881547db34b284d9af2bd7cac0d2f66bd14758c079c345d2bce3e1efc3599b0d06e69a92db7e05473673726e1848df75e83df50f98a9321468c10c6514dd7b3cb5b0ddf2fef13284463fe88ef2bc95d51288e41e3e0ba5c91bd686d7f7658b5bac8e3991a0f3b36f004585d9edd09d478ede73e7da067ea502894fc1247e62c1a84c9065ffccc3da96f07ddce135ceafdd784dda6f64a7add400d21ae13abf98e90fb96fcda23a8ad79905428a349b2230c19cf8cdca1724382bd19b4b075438098bd46cbc668bcbdcf1da85f733a50669f976a7106ff1936f20d799e7d01b0beeba7057a90016fb2d36bb3d14e11ac077ffb91f139d16ae5e78d84559312c9fd1a91ff70e5d9b4fd279420f7647151fe951b705082230ad20415d2f605665cd9374a50f7fc3c32efe30e4c0fc84f0c0e0bcd35e46665e4f29371c1c96324f65a94c85874e8e3baba68b88acc85c38f466d7353b5a00953c8ff5522cf0903e646301e3539d047286d10be16d9fbef02d450b7b12b1ead250f68c4c893fbc6f48765f34a81c477966278a1c6945d14d6531f0b5e4cef4edaf3708a5787162c39b236272694b0a302465d01004fa9d516414c040c27fccbf38466023b06293bd07f31fcdbb3b5378a8b2c7886e1551b1caeab96f7f3c159075756f5f8aaa05b6f87f581d802903c36d84169f87c01a77c43284adf66daa38fe8633a8f6d258231466aba89bb9e56ef289d2815cde433e381ddf74852302684b974c80d0a1d7d578039120a2db36213d167687e9390ed8f14c709d3149d2f5cfbc42a9b85e6d10c4ee77270534fdff2ccb816dedc6377257a73ba2a4ea82c0cc4a81bfb939f710b109beb279edeeda345867c0130ff6fc0fbbbbd25d16d2e73f6ab2fbb4c72abc0713ef9e4690b72208c468fa64c21f2247867b5edd514e5be23733ef2136b10e03161fe3b4e6018933df1f97a8250747e6140780a064e0c35bfafcb8177a75e8fde25e61e1c6332bbf7ddc3a11910feb3bd66707a7c1a9f87b320298acec88135a177b2f6f3c0ff02765f34c30b078b58470cd227ce4c0a1a77e662180b28fb360c3fbf47ba8982510979b86332b6d8a53d5f3947665c119a71e5b6ddc64228b47c3e23c30ebbede354d71d2780456de3f717a384112ccc9805dfe107fc9440ab7abccd8463150ce1306778fbea9793d88c4a7864a925bac5da593d6b72ee2c743f0b732a10285d293359c8126004f06fa12c0b3ca9c1e9c0a75b587207965c49f7450cb4210da01e5604f83f849b7eb9cb3f73de4bcff064b4fe08580971184db940bf29d6b8cea027dbfe0b78d6fa574da4b5b4db57b4e1939213c9848537baaf8076b1db4e767a467ec6a47c67aa33df96d7113cc9884124bca5579ce0058618b1b1f13ff3b102ff54e6bde7e29fd555acd6ffe6429af27301b6bbda38403d8f6f3266d7724f517778e7b2c1e13e3d83104ce743857c07b70cb5be8d6d8757d63a3bedfc9de79cc7e96c9833e15fd65f1336178568c2453a49cfc8c8dcb4f5d978f0a6144d62a5da6fd75d08a41084d4c59b345e07a5f4446af6e5216cc8d94347d333030015e262036f0a4fa6d2ae523654c55b37ac179efb66d230de5c70a9b33738ef0cd4ba2710d9ec03f426701101182051516a9be380a07e2555a8cda03eafc72d2bc2bc1dcadde4bb819692c1736b0ed203c4934842d791aae9e10bf239cc5393c9faf967109444c8f44532766ca5481f0ac16d25753a121727271c71d97b401dafe91588b362f2798f047deece9f860624b2d5753e46f929f8c2d03753e7245ffed6d8e36c7b380c4fb6a27e087a38b5e4a80f0043f95e5a20701c62692e684a764074e47badcecf8b2145be47b5b7089c249abcf0743a61b517004d120929d7846a39a46e0ddbcf5334fc01aca0bff31e67da8b3c88e38504db1dc3940c55bee158ae6dfcce289cf91106397d8e3990149a86c819e0354d785a4eed76fa6380491b01efbc23e7189ec253884d384865bca5da9a0917d68144a0a02cde867c365d339a025b7c8a16b82e341719a259ede8f09c165a354fd3e8f5d59e349e7c36302cf8ed115537969b598337fe7575157c89a254c0829cdb243d3d788321c756bf2817721db4bead96e1f25be5b8c7100d149d13900b6c6491ddbdbeaef7753ed5c5d9b07449bfd023501075ec08c37c13df696bf73500bd440a6522f5b955862c5eee8dc6e875c5055350b3397a6b31d2b764308ad24aafd4113af76f38f4aaba9e24efa3ad5b1c008a6cad2411ef6c7276dd3a5a2ae8130f91c36c34137731426fdf5272ef4ac5c4415e2d0f7b50da3ae910ba22bb5b962351e841746b"}) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(0xffffffffffffffff, 0xd000943d, &(0x7f00000745c0)={0x5af, [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r17}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r15}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r19, r18}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r18}, {}, {0x0, r14}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r16}, {}, {}, {}, {}, {}, {0x0, r14}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r18}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r16}, {}, {}, {}, {}, {}, {}, {0x0, r15}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r16}], 0x81, "7464fbe08eb369"}) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(r3, 0xd000943d, &(0x7f0000043f80)={0xffff, [{r4, r6}, {r5, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {0x0, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r4, r6}, {r5}, {r5, r6}, {0x0, r6}, {r4, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {0x0, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {0x0, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {0x0, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r4}, {r5}, {0x0, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r4, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r4}, {r4, r6}, {r4}, {r5}, {r4, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r4, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {0x0, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {r4, r6}, {r5}, {0x0, r6}, {0x0, r6}, {r5, r6}, {r5, r6}, {0x0, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {0x0, r6}, {r4, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r5}, {r4, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {0x0, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r5}, {r4, r6}, {r4, r6}, {0x0, r6}, {r5, r6}, {0x0, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r4}, {0x0, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r5}, {r5, r6}, {r5, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {0x0, r6}, {0x0, r6}, {r4, r6}, {r5}, {r5}, {r4, r6}, {r4, r6}, {0x0, r6}, {r5, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {r4, r6}, {r4}, {r5}, {r4, r6}, {0x0, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {r4, r6}, {0x0, r6}, {0x0, r6}, {r5, r6}, {r4}, {r5, r6}, {r4}, {r4}, {r4, r6}, {r4, r6}, {r5, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r4}, {r5, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r5}, {r5, r6}, {r4}, {r4, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r4}, {r4, r6}, {r4, r6}, {r5, r6}], 0xa4, "c71a4b87d907f8"}) r21 = socket$netlink(0x10, 0x3, 0xf) ioctl$sock_SIOCSIFVLAN_GET_VLAN_VID_CMD(r21, 0x8983, &(0x7f0000000040)) ioctl$sock_SIOCGIFVLAN_DEL_VLAN_CMD(r21, 0x8982, 0x0) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(r21, 0x81f8943c, &(0x7f0000002c40)={0x0, ""/256, 0x0, 0x0}) ioctl$BTRFS_IOC_INO_LOOKUP_USER(0xffffffffffffffff, 0xd000943e, &(0x7f0000001800)={0x0, r22, "02b66c11beed7c64705a2327ec95767e4393a580b2c3043a2fcfb08839b8897467f4a525091f0f7d2480b2fbdcfd2a3924b674e8aed38628fb035a463934e151ef7c0289d4fd7b308135026f77657ca78a849330727be579703bef5f51cf16a7198f8eb8e962cc55e47a88645dc99b6e4dfd15399b64e979124ef3a9120208d05d9de3ff1ce9cc9e6353b97b13c914e3530a6ec5b967674f3cbc69538c66356f6777af618dd96e1730048727e1752cc8139776c1e5061154ffe7838008ce90ccbf0827c03a28016d5f3bdabbc98bee9c405509e3e094ba1677d6b347061c346722828810ba1b68424c585770f6527f3856630aedb97f2ed5ff2e013d5d412046", "e26481ed1e7c639b5947fa03672a9556f2d9c88f35f8f8b62d6b01c1aef3d08f4ee43881217f959db47d280e8448925694f755ec0256840e58a31c14f0d78d223c58da8e0bd812fb893403e655823624c9e0581484207a6d914ad9befaa148971274f98aa0b753b38761ffcd20135aa09bee95ffc38cfb410de6eb0b1c0eaf69af8375bb982d21281acaa2966309f31ed037b8789d3bf55cda6f1fdefac7c7d4eec101525b850f72c5d3515de41ea6c4cc0a1d4c9d0e83fe98d8baa6325482d6270833cf890aba6768abf6a6ac45c0268bac824f692a521bf8500ce437d7bd4ecafbf918c063d8af3d110e24ddc569f535794d4c8c4ab3897b27310c8d39efca731b7a22caf0ec5f2df04ce6b496582e72b5cbb10480d59c191cd3eca3d3a973fe653216cd08c8de4098133f85da499af2f6a6c7513755f40d13810388f5476a67bb722dc832e4af4c76fda32aa9699c8191a644df90df4b2b2e7993e90bee48e3b65cbc84de44a15926c157018e46c849d57933b96f67dcec40eb733515b880aabbfd1324433ef61e0a112430d3829717820a9eb79c8767614facf386e07a7df2ceb2e9f9d33d65d33fecc7697f5bf74769a67ac297756ec495eaf87674ad5fae2fd772301896e85c617328f32f69bb718bf1ade1d8fd637a6bb4c7044929bf43757821b2bde4ce2ec164ef3841458207326249547ccd2c3ca3467e8c5474cb820ee8647c90d2da6af054fa1f25afb1b0792dcf21b2736c67fc8119a6bdbe19689bb639113f5b9165a50b6f2df8dd8c549220488da3bb10e45dcc7a6207635a550e5ee913aa8ec6ea5f92ba59eb003424e6ea472df633220c8e9644d5bf2f4b01027fc5c0422c39932531e1be7e62e021ac4592b57d95720d5ae86f4bd11d95ad82569ab24e0d1b5a144e6fbcc76c4d7800a70069a852979a3a1f02fcbef6bfeff2101576c998109c65350175bd05435d4be236097340cfafadfde768b1176e6c40f34731164afcd0c3620a0cd015494e432e9aff2c59a4cf476d9037a398183fe74789da0300172e1c02173ff867faf6fbed165ca819be3e4ea05fb6ff1633430984bece64ef93ff8d012b9c321941d59f5b8572008f6bb22597864ea1fbd6b9e349b6d9dc1adc185ea32d5e67e44ac5ebea2063635a53e1718a4246ea47a8874daae0ac6653f821f381a940cbfc98d4a25aea33c63309ee1c6c20f349b673509f09b5381956611359754bdb2eeda100ccb4834596de45556611d5b568f2604653c2129a7d5bd50a209a6d4a956c108b5349a8d208431a2edb17d7650332d308ea2e6f324e589f0e98de49ab655a77509637f0a65614c33f16f91e0d6305e8f9ba0766154e1599f67005ff75af7a6143e574acb799f2363dbb37971fc451c0054b5753de3e2278afe06b9e64b93ab09b4985fe26626a6266da1088f7b9755ac8a9e4e6e99a0dedfe2b1509de12f75d9dea83475543d2b328e986779f4992c870ad128b9d09e8ebcc264e8bb5b85d6d62ab902b8ed7409448cc26a777882627bf3e0c9cdd473289154560c2838c6d4fdaea97ff5d7992909c67710dfeb4977ea7bc06d77b06a19efa42178c46a3fa66ed7d6e2b67ab86be5d94721b099947563db706c5c3a12744ea61d63fce93f546f2669c50b6568e3f32d79bfc75adefc21ee93c5c5360cf2e7ac19571c3663857baf3325b666570642da4dcb309dab05b7ad05bf832d28fa8e871f7b89d6f4327953f4dbde3aac022f4dfe050d0924427e39a8027fbc4b54c4a8c2bf35f8c11c9c0ac4bc1fc31d6bdb2cee2675c5a782aa3ab5ffcc7be7207c89cdb8546fcfdd3a5aadfee5a296abd9afc127ebbf580649e9132b55d9f40a3778af49248b593e00c9f4812b8f7adb7323ab7852e4ed09c55aa356b2e8fee6eb14be3659fdf821d23ac556845f70670d8b6a417c29ac0b1585ea865634c0c921e9d930d4018f66d1e024179d80a7154c491c8662d427ab44a3633480782889f1d00c1d182ede30d4127d769291ef408b574a41c29153b7d949d4648b60a4faf32b380dbc87146c36479d47023876abd4ae7e289ba79d988f6c3a86a75bdc784bea2f0b7e2f77cb1282f54a18e1117e50ffa46ac208fcf8a7b5751e83b3e80cb428e4c2ae63ca3cf7b2c4353303edfe328626391f7925f1ab4ef1c7d0f54d0e45590188ecdb6d2c9c0db9b0552dc81b21dc1bbe9e94be5f78dad11f53d6fc3602e9ffb872d3557c44a133ee94e50d57d5eae2214569975982c63f22750db6f5c979ed81b3f68fe6be6daa85bcd98f25548c1d4db1f15394bc708933e0352e4059bedbf832abaf75e9bdca6fea93600357ca4153357d2e0661a308edf82c0f53e7a121c7e1e8bafdc5fdb61c21f4716d06b43a8395aa915a34c4dc4b5b15ba70a4e163728a9966515682b9ca3c2499aeb17a3c17f905fefed806b504f77c52564006273282db5e0c70565ee1016fc7c241830bef951294db9682ef41742b6550246e539143f15d6c2f017a8083ec97eb3a2de8bc2d8d5fec2b9b88996b3ff6c5aa5bed326f72fe9190de74a83e380b9fe89324421697d124d9a1bd3ced8e1856923147958582d737a36da6af4a0fd92b83f0ef0cb1a725d3a5ecb3a39ad039d200989a281a0686336457824ef582698222b7a063475b793a21745d6701940a0baf124461ad71de8dff6a6f7ed676d107e01ae6b7b79aa1f96efce91039dead977bf70365de8ebb3ec06634246d62f78287831984accef27048794468f520db5c71b4fa94818ddc7394fa1b609adb8cc80c0f32efac26a47bd74119100f0cfdcb889aab1019786cc0eefc1e4295ae919e9f6c8c8a16aa76a2bfe39adf5929e9b9925da0241e734378fe140981e3536bf0b77eb0263297a936a5f37605f128d79b16723b953331f99633b8d29ad1d1dbdb74188488ea0d1b7e0ddb652c040aa0477a079e92618e52f3b7ceebd62e0f0c6946469c19ae828b7eed288c3a53320c9d5468c39d608bd42d967a21e6b788de7c6826d1b99130083182562f63443290aeeb24308d3eb4881547db34b284d9af2bd7cac0d2f66bd14758c079c345d2bce3e1efc3599b0d06e69a92db7e05473673726e1848df75e83df50f98a9321468c10c6514dd7b3cb5b0ddf2fef13284463fe88ef2bc95d51288e41e3e0ba5c91bd686d7f7658b5bac8e3991a0f3b36f004585d9edd09d478ede73e7da067ea502894fc1247e62c1a84c9065ffccc3da96f07ddce135ceafdd784dda6f64a7add400d21ae13abf98e90fb96fcda23a8ad79905428a349b2230c19cf8cdca1724382bd19b4b075438098bd46cbc668bcbdcf1da85f733a50669f976a7106ff1936f20d799e7d01b0beeba7057a90016fb2d36bb3d14e11ac077ffb91f139d16ae5e78d84559312c9fd1a91ff70e5d9b4fd27940415d2f605665cd9374a50f7fc3c32efe30e4c0fc84f0c0e0bcd35e46665e4f29371c1c96324f65a94c85874e8e3baba68b88acc85c38f466d7353b5a00953c8ff5522cf0903e646301e3539d047286d10be16d9fbef02d450b7b12b1ead250f68c4c893fbc6f48765f34a81c477966278a1c6945d14d6531f0b5e4cef4edaf3708a5787162c39b236272694b0a302465d01004fa9d516414c040c27fccbf38466023b06293bd07f31fcdbb3b5378a8b2c7886e1551b1caeab96f7f3c159075756f5f8aaa05b6f87f581d802903c36d84169f87c01a77c43284adf66daa38fe8633a8f6d258231466aba89bb9e56ef289d2815cde433e381ddf74852302684b974c80d0a1d7d578039120a2db36213d167687e9390ed8f14c709d3149d2f5cfbc42a9b85e6d10c4ee77270534fdff2ccb816dedc6377257a73ba2a4ea82c0cc4a81bfb939f710b109beb279edeeda345867c0130ff6fc0fbbbbd25d16d2e73f6ab2fbb4c72abc0713ef9e4690b72208c468fa64c21f2247867b5edd514e5be23733ef2136b10e03161fe3b4e6018933df1f97a8250747e6140780a064e0c35bfafcb8177a75e8fde25e61e1c6332bbf7ddc3a11910feb3bd66707a7c1a9f87b320298acec88135a177b2f6f3c0ff02765f34c30b078b58470cd227ce4c0a1a77e662180b28fb360c3fbf47ba8982510979b86332b6d8a53d5f3947665c119a71e5b6ddc64228b47c3e23c30ebbede354d71d2780456de3f717a384112ccc9805dfe107fc9440ab7abccd8463150ce1306778fbea9793d88c4a7864a925bac5da593d6b72ee2c743f0b732a10285d293359c8126004f06fa12c0b3ca9c1e9c0a75b587207965c49f7450cb4210da01e5604f83f849b7eb9cb3f73de4bcff064b4fe08580971184db940bf29d6b8cea027dbfe0b78d6fa574da4b5b4db57b4e1939213c9848537baaf8076b1db4e767a467ec6a47c67aa33df96d7113cc9884124bca5579ce0058618b1b1f13ff3b102ff54e6bde7e29fd555acd6ffe6429af27301b6bbda38403d8f6f3266d7724f517778e7b2c1e13e3d83104ce743857c07b70cb5be8d6d8757d63a3bedfc9de79cc7e96c9833e15fd65f1336178568c2453a49cfc8c8dcb4f5d978f0a6144d62a5da6fd75d08a41084d4c59b345e07a5f4446af6e5216cc8d94347d333030015e262036f0a4fa6d2ae523654c55b37ac179efb66d230de5c70a9b33738ef0cd4ba2710d9ec03f426701101182051516a9be380a07e2555a8cda03eafc72d2bc2bc1dcadde4bb819692c1736b0ed203c4934842d791aae9e10bf239cc5393c9faf967109444c8f44532766ca5481f0ac16d25753a121727271c71d97b401dafe91588b362f2798f047deece9f860624b2d5753e46f929f8c2d03753e7245ffed6d8e36c7b380c4fb6a27e087a38b5e4a80f0043f95e5a20701c62692e684a764074e47badcecf8b2145be47b5b7089c249abcf0743a61b517004d120929d7846a39a46e0ddbcf5334fc01aca0bff31e67da8b3c88e38504db1dc3940c55bee158ae6dfcce289cf91106397d8e3990149a86c819e0354d785a4eed76fa6380491b01efbc23e7189ec253884d384865bca5da9a0917d68144a0a02cde867c365d339a025b7c8a16b82e341719a259ede8f09c165a354fd3e8f5d59e349e7c36302cf8ed115537969b598337fe7575157c89a254c0829cdb243d3d788321c756bf2817721db4bead96e1f25be5b8c7100d149d13900b6c6491ddbdbeaef7753ed5c5d9b07449bfd023501075ec08c37c13df696bf73500bd440a6522f5b955862c5eee8dc6e875c5055350b3397a6b31d2b764308ad24aafd4113af76f38f4aaba9e24efa3ad5b1c008a6cad2411ef6c7276dd3a5a2ae8130f91c36c34137731426fdf5272ef4ac5c4415e2d0f7b50da3ae910ba22bb5b962351e841746b00"}) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(0xffffffffffffffff, 0x81f8943c, &(0x7f0000000540)={0x0, ""/256, 0x0, 0x0}) ioctl$BTRFS_IOC_INO_LOOKUP_USER(0xffffffffffffffff, 0xd000943e, &(0x7f0000001800)={0x0, 0x0, "02b66c11beed7c64705a2327ec95767e4393a580b2c3043a2fcfb08839b8897467f4a525091f0f7d2480b2fbdcfd2a3924b674e8aed38628fb035a463934e151ef7c0289d4fd7b308135026f77657ca78a849330727be579703bef5f51cf16a7198f8eb8e962cc55e47a88645dc99b6e4dfd15399b64e979124ef3a9120208d05d9de3ff1ce9cc9e6353b97b13c914e3530a6ec5b967674f3cbc69538c66356f6777af618dd96e1730048727e164fcc8139776c1e5061154ffe7838008ce90ccbf0827c03a28016d5f3bdabbc98bee9c405509e3e094ba1677d6b347061c346722828810ba1b68424c585770f6527f3856630aedb97f2ee0742e013d5d412046", "e26481ed1e7c639b5947fa03672a9556f2d9c88f35f8f8b62d6b01c1aef3d08f4ee43881217f959db47d280e8448925694f755ec0256840e58a31c14f0d78d223c58da8e0bd812fb893403e655823624c9e0581484207a6d914ad9befaa148971274f98aa0b753b38761ffcd20135aa09bee95ffc38cfb410de6eb0b1c0eaf69af8375bb982d21281acaa2966378f31ed037b8789d3bf55cda6f1fdefac7c7d4eec101525b850f72c5d3515de41ea6c4cc0a1d4c9d0e83fe98d8baa6325482d6270833cf890aba6768abf6a6ac45c0268bac824f692a521bf8500ce437d7bd4ecafbf918c063d8af3d110e24ddc569f535794d4c8c4ab3897b27310c8d39efca731b7a22caf0ec5f2df04ce6b496582e72b5cbb10480d59c191cd3eca3d3a973fe653216cd08c8de4098133f85da499af2f6a6c7513755f40d13810388f5476a67bb722dc832e4af4c76fda32aa9699c8191a644df90df4b2b2e7993e90bee48e3b65cbc84de44a15926c157018e46c849d57933b96f67dcec40eb733515b880aabbfd1324433ef61e0a112430d3829717820a9eb79c8767614facf386e07a7df2ceb2e9f9d33d65d33fecc7697f5bf74769a67ac297756ec495eaf87674ad5fae2fd772301896e85c617328f32f69bb718bf1ade1d8fd637a6bb4c7044929bf43757821b2bde4ce2ec164ef3841458207326249547ccd2c3ca3467e8c5474cb820ee8647c90d2da6af054fa1f25afb1b0792dcf21b2736c67fc8119a6bdbe19689bb639113f5b9165a50b6f2df8dd8c549220488da3bb10e45dcc7a6207635a550e5ee913aa8ec6ea5f92ba59eb003424e6ea472df633220c8e9644d5bf2f4b01027fc5c0422c39932531e1be7e62e021ac4592b57d95720d5ae86f4bd11d95ad82569ab24e0d1b5a144e6fbcc76c4d7800a70069a852979a3a1f02fcbef6bfeff2101576c998109c65350175bd05435d4be236097340cfafadfde768b1176e6c40f34731164afcd0c3620a0cd015494e432e9aff2c59a4cf476d9037a398183fe74789da0300172e1c02173ff867faf6fbed165ca819be3e4ea05fb6ff1633430984bece64ef93ff8d012b9c321941d59f5b8572008f6bb22597864ea1fbd6b9e349b6d9dc1adc185ea32d5e67e44ac5ebea2063635a53e1718a4246ea47a8874daae0ac6653f821f381a940cbfc98d4a25aea33c63309ee1c6c20f349b673509f09b5381956611359754bdb2eeda100ccb4834596de45556611d5b568f2604653c2129a7d5bd50a209a6d4a956c108b5349a8d208431a2edb17d7650332d308ea2e6f324e589f0e98de49ab655a77509637f0a65614c33f16f91e0d6305e8f9ba0766154e1599f67005ff75af7a6143e574acb799f2363dbb37971fc451c0054b5753de3e2278afe06b9e64b93ab09b4985fe26626a6266da1088f7b9755ac8a9e4e6e99a0dedfe2b1509de12f75d9dea83475543d2b328e986779f4992c870ad128b9d09e8ebcc264e8bb5b85d6d62ab902b8ed7409448cc26a777882627bf3e0c9cdd473289154560c2838c6d4fdaea97ff5d7992909c67710dfeb4977ea7bc06d77b06a19efa42178c46a3fa66ed7d6e2b67ab86be5d94721b099947563db706c5c3a12744ea61d63fce93f546f2669c50b6568e3f32d79bfc75adefc21ee93c5c5360cf2e7ac19571c3663857baf3325b666570642da4dcb309dab05b7ad05bf832d28fa8e871f7b89d6f4327953f4dbde3aac022f4dfe050d0924427e39a8027fbc4b54c4a8c2bf35f8c11c9c0ac4bc1fc31d6bdb2cee2675c5a782aa3ab5ffcc7be7207c89cdb8546fcfdd3a5aadfee5a296abd9afc127ebbf580649e9132b55d9f40a3778af49248b593e00c9f4812b8f7adb7323ab7852e4ed09c55aa356b2e8fee6eb14be3659fdf821d23ac556845f70670d8b6a417c29ac0b1585ea865634c0c921e9d930d4018f66d1e024179d80a7154c491c8662d427ab44a3633480782889f1d00c1d182ede30d4127d769291ef408b574a41c29153b7d949d4648b60a4faf32b380dbc87146c36479d47023876abd4ae7e289ba79d988f6c3a86a75bdc784bea2f0b7e2f77cb1282f54a18e1117e50ffa46ac208fcf8a7b5751e83b3e80cb428e4c2ae63ca3cf7b2c4353303edfe328626391f7925f1ab4ef1c7d0f54d0e45590188ecdb6d2c9c0db9b0552dc81b21dc1bbe9e94be5f78dad11f53d6fc3602e9ffb872d3557c44a133ee94e50d57d5eae2214569975982c63f22750db6f5c979ed81b3f68fe6be6daa85bcd98f25548c1d4db1f15394bc708933e0352e4059bedbf832abaf75e9bdca6fea93600357ca4153357d2e0661a308edf82c0f53e7a121c7e1e8bafdc5fdb61c21f4716d06b43a8395aa915a34c4dc4b5b15ba70a4e163728a9966515682b9ca3c2499aeb17a3c17f905fefed806b504f77c52564006273282db5e0c70565ee1016fc7c241830bef951294db9682ef41742b6550246e539143f15d6c2f017a8083ec97eb3a2de8bc2d8d5fec2b9b88996b3ff6c5aa5bed326f72fe9190de74a83e380b9fe89324421697d124d9a1bd3ced8e1856923147958582d737a36da6af4a0fd92b83f0ef0cb1a725d3a5ecb3a39ad039d200989a281a0686336457824ef582698222b7a063475b793a21745d6701940a0baf124461ad71de8dff6a6f7ed676d107e01ae6b7b79aa1f96efce91039dead977bf70365de8ebb3ec06634246d62f78287831984accef27048794468f520db5c71b4fa94818ddc7394fa1b609adb8cc80c0f32efac26a47bd74119100f0cfdcb889aab1019786cc0eefc1e4295ae919e9f6c8c8a16aa76a2bfe39adf5929e9b9925da0241e734378fe140981e3536bf0b77eb0263297a936a5f37605f128d79b16723b953331f99633b8d29ad1d1dbdb74188488ea0d1b7e0ddb652c040aa0477a079e92618e52f3b7ceebd62e0f0c6946469c19ae828b7eed288c3a53320c9d5468c39d608bd42d967a21e6b788de7c6826d1b99130083182562f63443290aeeb24308d3eb4881547db34b284d9af2bd7cac0d2f66bd14758c079c345d2bce3e1efc3599b0d06e69a92db7e05473673726e1848df75e83df50f98a9321468c10c6514dd7b3cb5b0ddf2fef13284463fe88ef2bc95d51288e41e3e0ba5c91bd686d7f7658b5bac8e3991a0f3b36f004585d9edd09d478ede73e7da067ea502894fc1247e62c1a84c9065ffccc3da96f07ddce135ceafdd784dda6f64a7add400d21ae13abf98e90fb96fcda23a8ad79905428a349b2230c19cf8cdca1724382bd19b4b075438098bd46cbc668bcbdcf1da85f733a50669f976a7106ff1936f20d799e7d01b0beeba7057a90016fb2d36bb3d14e11ac077ffb91f139d16ae5e78d84559312c9fd1a91ff70e5d9b4fd279420f7647151fe951b705082230ad20415d2f605665cd9374a50f7fc3c32efe30e4c0fc84f0c0e0bcd35e46665e4f29371c1c96324f65a94c85874e8e3baba68b88acc85c38f466d7353b5a00953c8ff5522cf0903e646301e3539d047286d10be16d9fbef02d450b7b12b1ead250f68c4c893fbc6f48765f34a81c477966278a1c6945d14d6531f0b5e4cef4edaf3708a5787162c39b236272694b0a302465d01004fa9d516414c040c27fccbf38466023b06293bd07f31fcdbb3b5378a8b2c7886e1551b1caeab96f7f3c159075756f5f8aaa05b6f87f581d802903c36d84169f87c01a77c43284adf66daa38fe8633a8f6d258231466aba89bb9e56ef289d2815cde433e381ddf74852302684b974c80d0a1d7d578039120a2db36213d167687e9390ed8f14c709d3149d2f5cfbc42a9b85e6d10c4ee77270534fdff2ccb816dedc6377257a73ba2a4ea82c0cc4a81bfb939f710b109beb279edeeda345867c0130ff6fc0fbbbbd25d16d2e73f6ab2fbb4c72abc0713ef9e4690b72208c468fa64c21f2247867b5edd514e5be23733ef2136b10e03161fe3b4e6018933df1f97a8250747e6140780a064e0c35bfafcb8177a75e8fde25e61e1c6332bbf7ddc3a11910feb3bd66707a7c1a9f87b320298acec88135a177b2f6f3c0ff02765f34c30b078b58470cd227ce4c0a1a77e662180b28fb360c3fbf47ba8982510979b86332b6d8a53d5f3947665c119a71e5b6ddc64228b47c3e23c30ebbede354d71d2780456de3f717a384112ccc9805dfe107fc9440ab7abccd8463150ce1306778fbea9793d88c4a7864a925bac5da593d6b72ee2c743f0b732a10285d293359c8126004f06fa12c0b3ca9c1e9c0a75b587207965c49f7450cb4210da01e5604f83f849b7eb9cb3f73de4bcff064b4fe08580971184db940bf29d6b8cea027dbfe0b78d6fa574da4b5b4db57b4e1939213c9848537baaf8076b1db4e767a467ec6a47c67aa33df96d7113cc9884124bca5579ce0058618b1b1f13ff3b102ff54e6bde7e29fd555acd6ffe6429af27301b6bbda38403d8f6f3266d7724f517778e7b2c1e13e3d83104ce743857c07b70cb5be8d6d8757d63a3bedfc9de79cc7e96c9833e15fd65f1336178568c2453a49cfc8c8dcb4f5d978f0a6144d62a5da6fd75d08a41084d4c59b345e07a5f4446af6e5216cc8d94347d333030015e262036f0a4fa6d2ae523654c55b37ac179efb66d230de5c70a9b33738ef0cd4ba2710d9ec03f426701101182051516a9be380a07e2555a8cda03eafc72d2bc2bc1dcadde4bb819692c1736b0ed203c4934842d791aae9e10bf239cc5393c9faf967109444c8f44532766ca5481f0ac16d25753a121727271c71d97b401dafe91588b362f2798f047deece9f860624b2d5753e46f929f8c2d03753e7245ffed6d8e36c7b380c4fb6a27e087a38b5e4a80f0043f95e5a20701c62692e684a764074e47badcecf8b2145be47b5b7089c249abcf0743a61b517004d120929d7846a39a46e0ddbcf5334fc01aca0bff31e67da8b3c88e38504db1dc3940c55bee158ae6dfcce289cf91106397d8e3990149a86c819e0354d785a4eed76fa6380491b01efbc23e7189ec253884d384865bca5da9a0917d68144a0a02cde867c365d339a025b7c8a16b82e341719a259ede8f09c165a354fd3e8f5d59e349e7c36302cf8ed115537969b598337fe7575157c89a254c0829cdb243d3d788321c756bf2817721db4bead96e1f25be5b8c7100d149d13900b6c6491ddbdbeaef7753ed5c5d9b07449bfd023501075ec08c37c13df696bf73500bd440a6522f5b955862c5eee8dc6e875c5055350b3397a6b31d2b764308ad24aafd4113af76f38f4aaba9e24efa3ad5b1c008a6cad2411ef6c7276dd3a5a2ae8130f91c36c34137731426fdf5272ef4ac5c4415e2d0f7b50da3ae910ba22bb5b962351e841746b"}) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(0xffffffffffffffff, 0xd000943d, &(0x7f00000745c0)={0x5af, [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r26}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r24}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r28, r27}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r27}, {}, {0x0, r23}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r25}, {}, {}, {}, {}, {}, {0x0, r23}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r27}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r25}, {}, {}, {}, {}, {}, {}, {0x0, r24}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r25}], 0x81, "7464fbe08eb369"}) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(0xffffffffffffffff, 0xd000943d, &(0x7f0000044f80)={0xffffffff, [{}, {0x0, r6}, {0x0, r6}, {}, {}, {0x0, r6}, {0x0, r6}, {}, {r5}, {r5}, {r4, r6}, {r5}, {r4, r6}, {r5}, {r4}, {}, {r5}, {}, {r4}, {r5}, {r4, r6}, {0x0, r6}, {}, {r4, r6}, {r5}, {r5}, {r5}, {r5, r6}, {0x0, r6}, {r5, r6}, {}, {}, {r4}, {0x0, r6}, {0x0, r6}, {r5}, {r5, r6}, {r4, r6}, {r4, r6}, {0x0, r6}, {}, {r5, r6}, {r5, r6}, {r5, r6}, {}, {0x0, r6}, {r4, r6}, {}, {r5}, {r5, r6}, {r5, r6}, {r5}, {0x0, r6}, {}, {}, {r5}, {0x0, r6}, {r4}, {}, {}, {r5}, {}, {r4, r6}, {}, {0x0, r6}, {}, {r5}, {r4, r6}, {r4}, {}, {r5, r6}, {r4}, {r5}, {r5}, {r4}, {}, {}, {0x0, r6}, {r5}, {r5}, {}, {}, {}, {r4}, {}, {r4}, {r5, r6}, {}, {r5}, {r5, r6}, {r5, r6}, {}, {r5, r6}, {r4}, {r5, r6}, {r4}, {}, {r4, r6}, {r5}, {r4, r6}, {r5}, {r5}, {0x0, r6}, {r4}, {r5}, {}, {r4}, {r5}, {r5}, {r4}, {r4}, {r5, r6}, {r5}, {r4}, {r4, r6}, {r4}, {}, {}, {r4}, {0x0, r6}, {r5, r6}, {0x0, r6}, {0x0, r6}, {r5, r6}, {r5}, {}, {0x0, r6}, {r4, r6}, {0x0, r6}, {}, {r4}, {}, {r4, r6}, {0x0, r6}, {r4}, {r4, r6}, {0x0, r6}, {r4, r6}, {r4}, {0x0, r6}, {}, {}, {}, {0x0, r6}, {0x0, r6}, {r4, r6}, {0x0, r6}, {r5, r6}, {r5}, {r4}, {r5, r6}, {r5, r6}, {0x0, r6}, {r5}, {}, {r5}, {0x0, r6}, {}, {}, {r5}, {r4, r6}, {0x0, r6}, {}, {r5, r6}, {r5, r6}, {0x0, r6}, {r5}, {}, {}, {0x0, r6}, {}, {r4, r6}, {0x0, r6}, {0x0, r6}, {}, {}, {}, {r4, r6}, {}, {r4}, {}, {r5}, {r4, r6}, {r4, r6}, {}, {r4}, {r5}, {}, {}, {0x0, r6}, {}, {r4}, {0x0, r6}, {r4}, {r4}, {}, {0x0, r6}, {}, {0x0, r6}, {}, {r5, r6}, {0x0, r6}, {r4, r6}, {}, {r4}, {0x0, r6}, {}, {r5, r6}, {r5, r6}, {0x0, r6}, {0x0, r6}, {r5}, {r4, r6}, {0x0, r6}, {}, {r4, r6}, {r5, r6}, {r5}, {}, {r5, r6}, {r5}, {r5, r6}, {r4}, {0x0, r6}, {r4, r6}, {}, {0x0, r6}, {}, {0x0, r6}, {0x0, r6}, {}, {0x0, r6}, {r4, r6}, {}, {r5}, {}, {}, {}, {}, {0x0, r6}, {r5}, {r5}, {r5, r6}, {r4}, {r4}, {r4, r6}, {r5}, {0x0, r6}, {r5, r6}, {}, {r4, r6}, {r9}, {r16, r20}, {r4, r6}, {r25, r6}], 0x7f, "432ed70badba95"}) write$binfmt_script(r0, &(0x7f00000000c0)=ANY=[@ANYBLOB="2321202e2f66696c832f0a"], 0xb) [ 1962.102601][T27707] bond956: (slave bridge914): making interface the new active one [ 1962.131512][T27707] bridge914: entered promiscuous mode [ 1962.140746][T27707] bond956: (slave bridge914): Enslaving as an active interface with an up link 01:55:11 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x6002, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) 01:55:11 executing program 1: socket$inet6_tcp(0xa, 0x1, 0x0) (async) r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) (async) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r0, 0x0) accept4(r0, 0x0, 0x0, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket$inet6_tcp(0xa, 0x1, 0x0) sendto$inet6(r2, 0x0, 0x0, 0x20000004, &(0x7f0000000080)={0xa, 0x4e22}, 0x1c) recvfrom$inet6(r2, &(0x7f0000000000)=""/26, 0x1a, 0x40004050, &(0x7f0000001880)={0xa, 0x0, 0x0, @rand_addr=' \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02'}, 0x1c) setsockopt$inet6_tcp_int(r2, 0x6, 0x5, &(0x7f00000001c0)=0x4, 0x4) (async) setsockopt$inet6_tcp_int(r2, 0x6, 0x5, &(0x7f00000001c0)=0x4, 0x4) socket$netlink(0x10, 0x3, 0x0) (async) r3 = socket$netlink(0x10, 0x3, 0x0) sendmsg$nl_route_sched(0xffffffffffffffff, &(0x7f0000000180)={&(0x7f0000000100)={0x10, 0x0, 0x0, 0x800000}, 0xc, &(0x7f0000000140)={&(0x7f00000006c0)=@newqdisc={0x254, 0x24, 0x4, 0x70bd28, 0x25dfdbfe, {0x0, 0x0, 0x0, 0x0, {0xb, 0xd}, {0xd}, {0x5}}, [@qdisc_kind_options=@q_taprio={{0xb}, {0x224, 0x2, [@TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST={0x180, 0x2, 0x0, 0x1, [{0x44, 0x1, 0x0, 0x1, [@TCA_TAPRIO_SCHED_ENTRY_CMD={0x5, 0x2, 0x5}, @TCA_TAPRIO_SCHED_ENTRY_CMD={0x5, 0x2, 0x6}, @TCA_TAPRIO_SCHED_ENTRY_INTERVAL={0x8, 0x4, 0xe870}, @TCA_TAPRIO_SCHED_ENTRY_GATE_MASK={0x8, 0x3, 0x1f}, @TCA_TAPRIO_SCHED_ENTRY_GATE_MASK={0x8, 0x3, 0x2}, @TCA_TAPRIO_SCHED_ENTRY_CMD={0x5, 0x2, 0x3}, @TCA_TAPRIO_SCHED_ENTRY_GATE_MASK={0x8, 0x3, 0x80}, @TCA_TAPRIO_SCHED_ENTRY_GATE_MASK={0x8, 0x3, 0x400}]}, {0x54, 0x1, 0x0, 0x1, [@TCA_TAPRIO_SCHED_ENTRY_CMD={0x5, 0x2, 0x5}, @TCA_TAPRIO_SCHED_ENTRY_INTERVAL={0x8}, @TCA_TAPRIO_SCHED_ENTRY_INTERVAL={0x8, 0x4, 0x7fffffff}, @TCA_TAPRIO_SCHED_ENTRY_INTERVAL={0x8, 0x4, 0xba}, @TCA_TAPRIO_SCHED_ENTRY_INTERVAL={0x8, 0x4, 0x80000001}, @TCA_TAPRIO_SCHED_ENTRY_CMD={0x5, 0x2, 0x1}, @TCA_TAPRIO_SCHED_ENTRY_CMD={0x5, 0x2, 0x8}, @TCA_TAPRIO_SCHED_ENTRY_GATE_MASK={0x8, 0x3, 0xf94}, @TCA_TAPRIO_SCHED_ENTRY_GATE_MASK={0x8, 0x3, 0x53}, @TCA_TAPRIO_SCHED_ENTRY_CMD={0x5, 0x2, 0x1}]}, {0x24, 0x1, 0x0, 0x1, [@TCA_TAPRIO_SCHED_ENTRY_GATE_MASK={0x8, 0x3, 0x40}, @TCA_TAPRIO_SCHED_ENTRY_CMD={0x5, 0x2, 0x8}, @TCA_TAPRIO_SCHED_ENTRY_CMD={0x5, 0x2, 0x7}, @TCA_TAPRIO_SCHED_ENTRY_CMD={0x5, 0x2, 0x80}]}, {0x14, 0x1, 0x0, 0x1, [@TCA_TAPRIO_SCHED_ENTRY_CMD={0x5, 0x2, 0x6}, @TCA_TAPRIO_SCHED_ENTRY_CMD={0x5}]}, {0x24, 0x1, 0x0, 0x1, [@TCA_TAPRIO_SCHED_ENTRY_GATE_MASK={0x8, 0x3, 0x4}, @TCA_TAPRIO_SCHED_ENTRY_GATE_MASK={0x8, 0x3, 0x2}, @TCA_TAPRIO_SCHED_ENTRY_INTERVAL={0x8, 0x4, 0x4}, @TCA_TAPRIO_SCHED_ENTRY_INTERVAL={0x8, 0x4, 0x6}]}, {0xc, 0x1, 0x0, 0x1, [@TCA_TAPRIO_SCHED_ENTRY_INTERVAL={0x8, 0x4, 0x999}]}, {0x1c, 0x1, 0x0, 0x1, [@TCA_TAPRIO_SCHED_ENTRY_INTERVAL={0x8, 0x4, 0x8}, @TCA_TAPRIO_SCHED_ENTRY_INTERVAL={0x8, 0x4, 0x1}, @TCA_TAPRIO_SCHED_ENTRY_CMD={0x5, 0x2, 0x1}]}, {0xc, 0x1, 0x0, 0x1, [@TCA_TAPRIO_SCHED_ENTRY_INTERVAL={0x8, 0x4, 0x8}]}, {0x54, 0x1, 0x0, 0x1, [@TCA_TAPRIO_SCHED_ENTRY_GATE_MASK={0x8, 0x3, 0x40}, @TCA_TAPRIO_SCHED_ENTRY_INTERVAL={0x8, 0x4, 0x1}, @TCA_TAPRIO_SCHED_ENTRY_INTERVAL={0x8, 0x4, 0x400}, @TCA_TAPRIO_SCHED_ENTRY_CMD={0x5, 0x2, 0x9}, @TCA_TAPRIO_SCHED_ENTRY_GATE_MASK={0x8, 0x3, 0xfffffffc}, @TCA_TAPRIO_SCHED_ENTRY_CMD={0x5, 0x2, 0x7f}, @TCA_TAPRIO_SCHED_ENTRY_GATE_MASK={0x8, 0x3, 0x6}, @TCA_TAPRIO_SCHED_ENTRY_INTERVAL={0x8, 0x4, 0x1}, @TCA_TAPRIO_SCHED_ENTRY_INTERVAL={0x8, 0x4, 0x3}, @TCA_TAPRIO_SCHED_ENTRY_GATE_MASK={0x8, 0x3, 0x4}]}]}, @TCA_TAPRIO_ATTR_FLAGS={0x8, 0xa, 0x80}, @TCA_TAPRIO_ATTR_FLAGS={0x8, 0xa, 0x7ff}, @TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST={0x90, 0x2, 0x0, 0x1, [{0xc, 0x1, 0x0, 0x1, [@TCA_TAPRIO_SCHED_ENTRY_GATE_MASK={0x8, 0x3, 0xc466}]}, {0x1c, 0x1, 0x0, 0x1, [@TCA_TAPRIO_SCHED_ENTRY_GATE_MASK={0x8, 0x3, 0x1}, @TCA_TAPRIO_SCHED_ENTRY_GATE_MASK={0x8, 0x3, 0x7fff}, @TCA_TAPRIO_SCHED_ENTRY_CMD={0x5, 0x2, 0x5}]}, {0xc, 0x1, 0x0, 0x1, [@TCA_TAPRIO_SCHED_ENTRY_CMD={0x5, 0x2, 0x54}]}, {0xc, 0x1, 0x0, 0x1, [@TCA_TAPRIO_SCHED_ENTRY_CMD={0x5, 0x2, 0x7}]}, {0x4c, 0x1, 0x0, 0x1, [@TCA_TAPRIO_SCHED_ENTRY_CMD={0x5, 0x2, 0x8}, @TCA_TAPRIO_SCHED_ENTRY_GATE_MASK={0x8, 0x3, 0x6}, @TCA_TAPRIO_SCHED_ENTRY_GATE_MASK={0x8, 0x3, 0x54}, @TCA_TAPRIO_SCHED_ENTRY_INTERVAL={0x8, 0x4, 0xec}, @TCA_TAPRIO_SCHED_ENTRY_INTERVAL={0x8, 0x4, 0x5}, @TCA_TAPRIO_SCHED_ENTRY_GATE_MASK={0x8, 0x3, 0x6}, @TCA_TAPRIO_SCHED_ENTRY_GATE_MASK={0x8, 0x3, 0x1000}, @TCA_TAPRIO_SCHED_ENTRY_CMD={0x5, 0x2, 0x8}, @TCA_TAPRIO_SCHED_ENTRY_CMD={0x5, 0x2, 0x1}]}]}]}}]}, 0x254}, 0x1, 0x0, 0x0, 0x4}, 0x1) (async) sendmsg$nl_route_sched(0xffffffffffffffff, &(0x7f0000000180)={&(0x7f0000000100)={0x10, 0x0, 0x0, 0x800000}, 0xc, &(0x7f0000000140)={&(0x7f00000006c0)=@newqdisc={0x254, 0x24, 0x4, 0x70bd28, 0x25dfdbfe, {0x0, 0x0, 0x0, 0x0, {0xb, 0xd}, {0xd}, {0x5}}, [@qdisc_kind_options=@q_taprio={{0xb}, {0x224, 0x2, [@TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST={0x180, 0x2, 0x0, 0x1, [{0x44, 0x1, 0x0, 0x1, [@TCA_TAPRIO_SCHED_ENTRY_CMD={0x5, 0x2, 0x5}, @TCA_TAPRIO_SCHED_ENTRY_CMD={0x5, 0x2, 0x6}, @TCA_TAPRIO_SCHED_ENTRY_INTERVAL={0x8, 0x4, 0xe870}, @TCA_TAPRIO_SCHED_ENTRY_GATE_MASK={0x8, 0x3, 0x1f}, @TCA_TAPRIO_SCHED_ENTRY_GATE_MASK={0x8, 0x3, 0x2}, @TCA_TAPRIO_SCHED_ENTRY_CMD={0x5, 0x2, 0x3}, @TCA_TAPRIO_SCHED_ENTRY_GATE_MASK={0x8, 0x3, 0x80}, @TCA_TAPRIO_SCHED_ENTRY_GATE_MASK={0x8, 0x3, 0x400}]}, {0x54, 0x1, 0x0, 0x1, [@TCA_TAPRIO_SCHED_ENTRY_CMD={0x5, 0x2, 0x5}, @TCA_TAPRIO_SCHED_ENTRY_INTERVAL={0x8}, @TCA_TAPRIO_SCHED_ENTRY_INTERVAL={0x8, 0x4, 0x7fffffff}, @TCA_TAPRIO_SCHED_ENTRY_INTERVAL={0x8, 0x4, 0xba}, @TCA_TAPRIO_SCHED_ENTRY_INTERVAL={0x8, 0x4, 0x80000001}, @TCA_TAPRIO_SCHED_ENTRY_CMD={0x5, 0x2, 0x1}, @TCA_TAPRIO_SCHED_ENTRY_CMD={0x5, 0x2, 0x8}, @TCA_TAPRIO_SCHED_ENTRY_GATE_MASK={0x8, 0x3, 0xf94}, @TCA_TAPRIO_SCHED_ENTRY_GATE_MASK={0x8, 0x3, 0x53}, @TCA_TAPRIO_SCHED_ENTRY_CMD={0x5, 0x2, 0x1}]}, {0x24, 0x1, 0x0, 0x1, [@TCA_TAPRIO_SCHED_ENTRY_GATE_MASK={0x8, 0x3, 0x40}, @TCA_TAPRIO_SCHED_ENTRY_CMD={0x5, 0x2, 0x8}, @TCA_TAPRIO_SCHED_ENTRY_CMD={0x5, 0x2, 0x7}, @TCA_TAPRIO_SCHED_ENTRY_CMD={0x5, 0x2, 0x80}]}, {0x14, 0x1, 0x0, 0x1, [@TCA_TAPRIO_SCHED_ENTRY_CMD={0x5, 0x2, 0x6}, @TCA_TAPRIO_SCHED_ENTRY_CMD={0x5}]}, {0x24, 0x1, 0x0, 0x1, [@TCA_TAPRIO_SCHED_ENTRY_GATE_MASK={0x8, 0x3, 0x4}, @TCA_TAPRIO_SCHED_ENTRY_GATE_MASK={0x8, 0x3, 0x2}, @TCA_TAPRIO_SCHED_ENTRY_INTERVAL={0x8, 0x4, 0x4}, @TCA_TAPRIO_SCHED_ENTRY_INTERVAL={0x8, 0x4, 0x6}]}, {0xc, 0x1, 0x0, 0x1, [@TCA_TAPRIO_SCHED_ENTRY_INTERVAL={0x8, 0x4, 0x999}]}, {0x1c, 0x1, 0x0, 0x1, [@TCA_TAPRIO_SCHED_ENTRY_INTERVAL={0x8, 0x4, 0x8}, @TCA_TAPRIO_SCHED_ENTRY_INTERVAL={0x8, 0x4, 0x1}, @TCA_TAPRIO_SCHED_ENTRY_CMD={0x5, 0x2, 0x1}]}, {0xc, 0x1, 0x0, 0x1, [@TCA_TAPRIO_SCHED_ENTRY_INTERVAL={0x8, 0x4, 0x8}]}, {0x54, 0x1, 0x0, 0x1, [@TCA_TAPRIO_SCHED_ENTRY_GATE_MASK={0x8, 0x3, 0x40}, @TCA_TAPRIO_SCHED_ENTRY_INTERVAL={0x8, 0x4, 0x1}, @TCA_TAPRIO_SCHED_ENTRY_INTERVAL={0x8, 0x4, 0x400}, @TCA_TAPRIO_SCHED_ENTRY_CMD={0x5, 0x2, 0x9}, @TCA_TAPRIO_SCHED_ENTRY_GATE_MASK={0x8, 0x3, 0xfffffffc}, @TCA_TAPRIO_SCHED_ENTRY_CMD={0x5, 0x2, 0x7f}, @TCA_TAPRIO_SCHED_ENTRY_GATE_MASK={0x8, 0x3, 0x6}, @TCA_TAPRIO_SCHED_ENTRY_INTERVAL={0x8, 0x4, 0x1}, @TCA_TAPRIO_SCHED_ENTRY_INTERVAL={0x8, 0x4, 0x3}, @TCA_TAPRIO_SCHED_ENTRY_GATE_MASK={0x8, 0x3, 0x4}]}]}, @TCA_TAPRIO_ATTR_FLAGS={0x8, 0xa, 0x80}, @TCA_TAPRIO_ATTR_FLAGS={0x8, 0xa, 0x7ff}, @TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST={0x90, 0x2, 0x0, 0x1, [{0xc, 0x1, 0x0, 0x1, [@TCA_TAPRIO_SCHED_ENTRY_GATE_MASK={0x8, 0x3, 0xc466}]}, {0x1c, 0x1, 0x0, 0x1, [@TCA_TAPRIO_SCHED_ENTRY_GATE_MASK={0x8, 0x3, 0x1}, @TCA_TAPRIO_SCHED_ENTRY_GATE_MASK={0x8, 0x3, 0x7fff}, @TCA_TAPRIO_SCHED_ENTRY_CMD={0x5, 0x2, 0x5}]}, {0xc, 0x1, 0x0, 0x1, [@TCA_TAPRIO_SCHED_ENTRY_CMD={0x5, 0x2, 0x54}]}, {0xc, 0x1, 0x0, 0x1, [@TCA_TAPRIO_SCHED_ENTRY_CMD={0x5, 0x2, 0x7}]}, {0x4c, 0x1, 0x0, 0x1, [@TCA_TAPRIO_SCHED_ENTRY_CMD={0x5, 0x2, 0x8}, @TCA_TAPRIO_SCHED_ENTRY_GATE_MASK={0x8, 0x3, 0x6}, @TCA_TAPRIO_SCHED_ENTRY_GATE_MASK={0x8, 0x3, 0x54}, @TCA_TAPRIO_SCHED_ENTRY_INTERVAL={0x8, 0x4, 0xec}, @TCA_TAPRIO_SCHED_ENTRY_INTERVAL={0x8, 0x4, 0x5}, @TCA_TAPRIO_SCHED_ENTRY_GATE_MASK={0x8, 0x3, 0x6}, @TCA_TAPRIO_SCHED_ENTRY_GATE_MASK={0x8, 0x3, 0x1000}, @TCA_TAPRIO_SCHED_ENTRY_CMD={0x5, 0x2, 0x8}, @TCA_TAPRIO_SCHED_ENTRY_CMD={0x5, 0x2, 0x1}]}]}]}}]}, 0x254}, 0x1, 0x0, 0x0, 0x4}, 0x1) r4 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r4, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) (async) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r4, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) r6 = bpf$ITER_CREATE(0x21, &(0x7f0000000200), 0x8) r7 = syz_genetlink_get_family_id$nl80211(&(0x7f00000002c0), r3) sendmsg$NL80211_CMD_TESTMODE(r6, &(0x7f0000000340)={&(0x7f0000000280)={0x10, 0x0, 0x0, 0x8000}, 0xc, &(0x7f0000000300)={&(0x7f00000018c0)={0x14ac, r7, 0x10, 0x70bd26, 0x25dfdbff, {{}, {@void, @void}}, [@NL80211_ATTR_TESTDATA={0xf, 0x45, "dacc742a6f7e67992aaefc"}, @NL80211_ATTR_TESTDATA={0x67, 0x45, "85ec5de96ee0d17b170c7f2b207053f448360eb7def2ad52b050782132d619eaf82ce8c415e65723e20624e64bfd6c3ec79226c9ec693ae7e3d2ffff6239d14cd77278a09c978740e5d8503ff5f7c1b25465df913c572ab821454cb0983f5466869305"}, @NL80211_ATTR_TESTDATA={0xdd, 0x45, "367bb63f8db0a5037ad24050a84fc67b8debbc319294eb2157b8f64508295273cafe87377abe77d5a437257fe9598244f701d48d55258c2150a9c88074f633922e1200ad2e1ac967c6603bcff0e8c16628deb4609e6ee3196917b9c48da9b53c773944e2599f0fba3f7f6efd6c0ae645ee2fd9ad44e61a303af62d7afd07b431a649e88406d5c09300944a7f351262401fabf2057a4c5ed6ada0fbbb4d992414d06635faea29c703b46635766252d17a31181c1d39a8f0ab52b965fa28985be35c6a640fac2e3e871f5c01da14fbedd2292685bae00ae217d1"}, @NL80211_ATTR_TESTDATA={0xf9, 0x45, "e44981d4c42b2783b84f091d67ce53b8450ebd6bbcab0e379d7c6623f606d4f5128bc9752824c8202175afcc4752c30c7260af6d652817420f872b4a261219c129bd4087538368b781dbad95c44a5ddd53e71e24644a9413f0956b0a962bd5a4d294fdc99890af9df2f5cfddef764ff5dfcd6bb36f2baa684c5d1119560985c23ac32e69b4e28169c22845fe22bca2a37bd3ef1665f1aaf904b4cc967c340efc55fc33f7a0ba38f5867aaf3afaf879b5ab3413b7d338ddf79c0e24ae62e198b476516793762c29156f1608b115adadfbd444540164073753d51e19974ee6fe5f7d48e1c23ae903f5def2c7bd91675e2044e4116723"}, @NL80211_ATTR_TESTDATA={0x5b, 0x45, "64f798d9b63d8667fe70c5bbd75741414c8278bed9f11b70738ea88fad70153877fd394a6ac5c53adddb0f9f576eac344516a35885468bacfc4c420fc14c093ac4b39e76a5129647fb6ad267dfd5355018b361fafd9429"}, @NL80211_ATTR_TESTDATA={0x43, 0x45, "2afa7c492c4b8a42b6a326131c26fce979cf27a761c0e3f6ea1e449508ecf73531f847c1ce45b7282b5a83aa411fd1aee21e9e62c2a5d0015025805ffc48cd"}, @NL80211_ATTR_TESTDATA={0x1004, 0x45, "4b0f166e7a5b269024697f9d3cd94b80d6b653f456d0c14cc5465a452f432faff12c0ce4be34c0e5ed0245bdfc4af39e4ba3fedc5406c832e50f437f86cfcc3fbf63e9730e89365a0317f193548d170458cd434eac218f0620043430d42042bb981c90c78dcc966df88b24bc6b46d69909fb1ad6e9786d78bd249184f73dfd9a1c9cd2a958516b5db7709361d64bafda500ac49cdde1581684ff4aa927112f6daffe3be2b0efbca97a8097e1a90b14d8ae8c49ce391c01db75b57eb1a3502facdd57de4616fe81cee9a8888f3317e3187f04cf51730ee2be2b5e9a65e0f31dae36b6f682b3946dbad10d18aae0c48e1491512989dbc8e9d25962e1350e69c574dc67fabe90add3e264004f7d6e10e3d5642962fd8c9fc0712538591509466f77567a88e7f1206951fb63394f140d32a5fee70bd94f71a4ec90564ea2cf16a1fd240eddc20bbdbaaab34ed26cc485f289e262debd6225ee2704fc1ca65feb0a4c1254223f81a4c1493e168aafdf0e1b3a344ec8c4da45a0ce4af02833e019a9b831be716b3d7dee9aa2d293a88da953aa47187116fd59b572950a45b27b6d9995d8b0880c7ea01b411a7add3d70e4a297ccd04320f417138739355792380d03acb265cacf36378dc7964d0cc0f3efd3a76ab749fab6898c2a5d96189dc7ab151c62e9ca948b50eaf25e82cc469a2509c006bcfc822eb0e0b0ca925a22e5a4772d8a3664bfd0d632bc3a39371685261c266ed9fc237b336ff7b2a42a49f9b83c2f5fa7df0623a0bbe66e0eb1293cb0a8305b4c64a72b8728564831943196d6dc785c2612665df9b3c4bd3e386d4031d847cd255e03426ab90378550f4c8b4f8d3e99f8d0591889b51391849a5880ab6ce0036548791042f18790985bbaf20ebd0d66413286f89a2a8ed08fad257dd3b7f45b309bc57205b0d3a9e6511a785ae8120d318b7fa033b3b98a869d9a3f9271f1631bfe352e0ea63c454eaab25eb3a03bb35c97d1ec8a840c125df1c2d80fa70da8cc3cb448a3b04aeb28d204fdf089f0443d60a14c21e6439af0f8eee8778565272786a876e3676ff1a82bf358c993b9bf429eac71eb595a316173032df97d26b9d4059ddbd2b2a871b8eada67b5d3fa7e4d201222fd01589f3a335e60ddd0bb342ff95215055ee6450874911d595fdfe651ce183b9941f96361dc74f525eb6eca85b0ed2e46cc0c269e7346d606a5b4aec7e4efb1154d1d03c7d37e5c6f24f267fbf55df8d8fadb4bea99a39fef22c25a8c4d3a997a94edebcae870c825b4bcf4721a81f89d86bae41f327c3b1fd0db0239f3a6832cb063227b5216a06ae3ca75ecefeea58e5bb8b6a62dfd854adf8446865b80d9f058e552d0fd89c079eed4897c2071168d59cffc3a91d1db81c7d9cb19377932363d531c7fd15fad519bd108b5545dfb01947c4ee0a2735c820d5d16af340448912581e9cfe59cd8fb605f89026292c48572929088f3ac1131a62f8c84a33cd5689949f57e78deca3b5c3db1c748d69bae285feeb7d8fb0975fa33a5fec385a200f7bd69114e0eb7261a669be2d4fdc81b490b8cbd83c1196d5276a42e4581c4610206455eb5f96df22d148eaaeb5f22200cffc22e10cd36999f839a460218fdb3bf69e9fd14d55448207e0fbf3378c1c2d0dc1e7ffb7e47a49d10e307d8777bab6144f91d8c73ae14669e00613c35113d3029b932c8de662da87dc2464508785aa2ec3d50feada9f12ae2ea8fff6cc2ca78e97dae8aef987e875416bc02190c411b295eaa5f0ea59376230967dfaa7fe20ae7396e9a49b99563a8029335f491aeba1e02ec7ae206cd73707815e46313eece6bcf9481434f783614bb20d44f1c499184f0814bccbbb83e4efbc838ebdf86b28571c78276ce532ba07b116994f882f2c24fe6a6c1728edb215b2ca63969096fe9659505a6d74ae0d9a8398f45e822767a6dffc0b1ba97940c4aaacc2053760974be30381daf40ecab62aebd0d5d2478ac3af58d71aa0755d3b39398171e5c9a04715e8f312fb42706d0ddd86e1203bc333efaf4f1a33312679693f506be47b887e36d63627954c91565e450ed4a5e5a34f08cb0254d8a1f0f79b8a33de17336eed7d6f49e9dccb4846e37c999e657b863b3651f52154c3a7cc7303dfab692fbff4891279b2e766e795cfd0e62d730452d638c853ab49d60d995f182da83ed6770c42fe9ff2ce552baf6d495bd536bd3213bcc1e42bd3d4dc090dc92eadc75188ebf8153274e8391096833426a221ddacf04432730c769243356d06f64968dfe40a2f25a02437febf2d09031eecb8e8b59b94b31dea238aa69dc0bcc53cb5bb4060360acd2e260c62190eee6a289edbffff2370e02580b7e9d73aa8559bb8583666412f096e7a43bfbd176183b1716190316bf6c4758491ac3e8a4dba18a5b0f48f0c1bb0d5ff5a9b796db14b967c14efde69867d155a66957bf5044730d575e1d620159e7bfed0eba567bd0a1f1a1dc20ebebac49e6bd4ac344a1b08a5cb633d6e3aee467f7b25d1e839fa8b8a618d7f51cd504d5af94146fda98321f9f0759fe5f2c6aeb675c1a231fdafafa3525d9a4c1c3dd92f157c83c49f6405888cb5451d0c6bcb6c14592647d93f69e2bf89f77381f90f8bac4a79b7147dec24c2d05133e9f8ce0c62b1227c78cc4001350ac012d1dd560d0b7f06407b4f344dd9afa5aed3a8f8157d94c668ab05be99c5cdfa5884e2d678a20afc2eac9dac6293c7231c774addd5684acfaafdc3f7edbae48d24d0ad36324c2c6962778b0814587ff7f981c6643923822c2fd78eac4c300d67c48b17376a4f402f46163fb682f71350d2e1eb516124151cb067d5ea29786049812f07204ee14c0c14def92effd8ca54d7e7b1cdbfd0ec31aa11d10c28d18ea72d1d7a0889a0919f3b178a666005e9fba6280387d2978a97a532b867d7efa3b4a8b1052b876876c93dbc318db6e6b14b07bb6617a5ea6dc3673139027af402f07b6ef19d103fbc15a94ba8dad3127f8e8085fbabe6f293e03987faab5f3925a69a0c3d9407e2604a7194a6648ed7ac17131161c01462271e6399d7247afb0bb35270b7d4dbfa499dc431c9b375fb5eb9f26db767beb1c2a17f62d33fce4030b7774eea475467bb703ca1e40d50bdaeeeac5a70d17e310b4962758667c10d0e0b9cacafa087b3ca2ffcec9cbb51f7806d9dcf48356d76b1592e248058555ef7127443c210e2190dfc5768bd61a271aec68ab038659a9290f9238662aaa051790bc0deac2742417d1acbf7cbe9a1e9d61da338de5ff7e43c1afcec6310e274e5c8553c9528d2492d069ca989e94d5dc189e4ce4f711a68441c20fac640a7c4a8bff9c27ff4837a96920d8249decdde31712b69b561083601944f93d7334c5081c15be0d346a538398189ccb9f4abc4c91b9950bae419d9849248213459c6ec71f83340ea90e590e5ebadd79f5fa1204018bfce3c23e925209166f39bbc730fe2c23beb6d4314092b89c5ed4ef802ae9174a1601cf6ba059a27f8f8f1e5326b8dc1b079a08ddced375bc4a1a9d43170a2acef45a4c51e1c8a32cb029b26812ff1d8e280a5c0e77862eff5bbc0e2fc1c720399cf21a7892ce8a8fa1b509b2c86fed3a737e67ab1c6fc0370ae9e86f31059628d8742e55ac47dace24a941819bf419aa8ca7038af380372313ca2e61805c5c8aa409df64cf765ed6e6293a351ad9752b8ebfe9e62666468fb1a29b629722980a7bb802fd56fac40b3de8e72b04c755e8e95623f0199d54cbbda0a4749265c37e95f68b5ee3fcfdeb4423491e45f7971ccae8c424c8092c25e79beeb30782789a39968808b43545d2cd6da1a53b420cb87b15b5c53a53ac9dd1fde6ecee28e735837975435a7e96501c0989e3138452b70df0424d91a400bbc9f6d70ccee7290d32249c4eff3fbaae59bd624df4d24051da71a70bf67ba4e6494b61111dfd9e436a9e85472aa29d252c8fb9f232cd3f9527416760f34ad255ade28b158cb75ab2e180964abcd106e7907166f84012be61f3d37caa914e46a897f05e60f9a2cef69f10fddbf509121a6b184f8450f184a0c7c61701f5af3bd462bd4361500742202e1419920f36f87ff5dc4da17a27dea5f43e2d271427a78387fc8305c866a28975cf56a134db4d2292eeaeccc3efeb1648e883aff1b366c884e2b71efb5d95e301c1fa6aa90acc3cd14ca66bfe938214c239dfc0b21e551df76b597e885254c354d12539265498a6db4531acf40b3da542b8e4c8c7f297321cbb53462fa43ee8c3bc31dfe6dbc883ce0e9245d3ee6b5e1cdc33f55d5efa98284eb77451f65e3c359daf395eff92141f041291104a205dfa3ecfc6d9e23efcf91c10e4dc49175596f6ac07b995c961e4cc36b1cd69d3624de95e641e318d50022db57d036ae1b2bb3f245e72347bf6d24958715f1731f600fb1e746124086ed67a47104c712a2092e13eaacf803edc8a6b6f0bd13d41b59f96bf3502d06d0369dd4c7ea61150b7192393d96eeff5cb2dc97e1efae2102f30795949216bfe60edc1502e291f7a8f1f274c53254f894df2be18d1f7708823a2eaa720d0f0997dde6cae3d8d969efdcdc43635ba76299306de975e46dd3b3e1977c13fd67a2055467fb9c411b78ad39bcd16c7c5c5dedc5665d9983dc7c008d087e1fae91300b01fbdc29ac84c88a4f5a4e5e82aa231c705c9f3f3b75eafd952ea62ea8d0b9d641106f101baf780a4d76f8229456f335a0683ff23682e012546d9443dacba509357bcd90499f7760687e9385ea4d9e0e70e647aa95043bcbb24634b1ada171ac441335b766efe5a9b9486434916ff4935fd906d7add8b065261fa4231f4a1d5ecbf1681fcc22cf0223635e17c0708b7fd74cfd68c916573438a96cd2528b4988dbb8c2978c0c8d8af402e544f52348569793d45a020adc04156372841026124ec0a352068d93a90b738bc7db756c9708f592b0d29656c3c923cfaeb4b5d62fab18f89e360c4e967b10d7019f61a249db244f44b7db4fcced2bb1f7d087a2c8e62be0d83d4f5d3abc1cacaf18b04b54fd2fcc528c02e4a604bded464970680c028335f487c450b2103ab7723893e26b178aa43b2e50cf79dc25cbb38491cdda2501659c92fc7a717c4e177b82a19753e1fa2ea02adf3045692e2bade95bd9e710935c5d9cca0ae80f258d39ca91a6d2b3ac954c56bc18f5c54d9644c1d2c55fc43520d744ac4f5e19d490d02fcc7be03b9b8f5c74f65958e3d0c959a00ae9559ba2b294cd9f773182f83aff53f59a4ab6ad714fd898ff1bed8ab5fa3b6735ba25ce4c14d91124bcd522394e1f8c1413daf587a40f1a51aef304df8f17f87c4cdd96ca5aab43ab52456b5cfd45a3a522d287a0ea7dd42d419982a66ba3b6118848a739423749c2e8bedfa0873a25704798020d6df7b457df64eab71ebc27b9e332e3faa03b557cc482e0f140a1f0e3013c606cabd8f5a804a3b7b6db6fa035e7bf7f86308c39d96cb39907382fd0efab9f8b5f0f207f788afe74bc372a23d1a255ddd6584caf97ca94b37c8a30938ee6677d329b8902a4f88d2ddbbbe2ab4b7d13d40ba557f62413333c45c8d918d60af02c75231dd78c51a27768d4baeb618525b7691fa6047dc9b152644f33ed98f7c671fa6d39c1db32e3c92cecad0567469ed9d328a6b87e98c535eeea8bab6bd91a0ed523b2b1d189d2f4d65c6d59a972dfdc5d976031d963d26652f74f9873fc142a842323c19adf9aeed83f4fc0ed3e0e9b4a48667a51a8459c67fb60c6fe782fe"}, @NL80211_ATTR_TESTDATA={0x60, 0x45, "70e224224a75037cf083b5403fb8b6d6342e0a2929418cec2d8d594dd36822945f8428fb60678209e1a34fea2b8279c13158ab9463d11ea110c0ec43e1bdbf6b6987590d7bfc299d5c770639c5e8daa24979b23d4e77578dec212305"}, @NL80211_ATTR_TESTDATA={0x8b, 0x45, "209c60aa89f8169ad61ca6fe9c4b53334a0145fc5066307b81f3a0627600038feffda5a96e2fb0c55c9082f884908afc9f2fb86423f537f445783fed659d6f7ed005d9b0dd4a98b12df872744b9da687102f78e43d81af3241629d4cfbe33e66b7ab0bd50fb300e4ae629c0021ffb6e63d4f628d949e50e5f28d29dc99acd349bb002f228ee1f9"}, @NL80211_ATTR_TESTDATA={0xb3, 0x45, "32dc7c5ee3c24ea1ba3db7fdb22d155964f32fb869362968124467bf154a478eefaf9376051b0bb6412c48fef632e4d3a5068226fca8b00559c59a3304010e1e46ea1553fc559bb9af315cb1f6c86d5793595cbf65cfc5eaa32b845410b87deb4b59638fccb0d301a31d7d4ee6b50a66d0f0f5eb9dffca168bbb2bcc27bb7d7f5d88d79b2abc6eda9dbe1d6494ab007fe786cfa101fb485bcc5e28df8ab63ef94239940371fbb4a4282866b54f9779"}]}, 0x14ac}, 0x1, 0x0, 0x0, 0x40081}, 0x20000080) sendmsg$nl_route(r3, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r5, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x4c}}, 0x0) 01:55:11 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r1, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r1, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r2, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x262, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r2}]}, 0x4c}}, 0x0) 01:55:12 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x13, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) 01:55:12 executing program 1: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r0, 0x0) r1 = accept4(r0, 0x0, 0x0, 0x0) connect$unix(r1, &(0x7f0000000080)=@file={0x0, './file0\x00'}, 0x6e) sendto$inet6(r1, &(0x7f00000000c0), 0xfffffdda, 0x0, 0x0, 0x600000000000004) r2 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000140), 0xffffffffffffffff) sendmsg$NL80211_CMD_SET_COALESCE(r1, &(0x7f00000001c0)={&(0x7f0000000100)={0x10, 0x0, 0x0, 0x20000000}, 0xc, &(0x7f0000000180)={&(0x7f00000006c0)={0x10a8, r2, 0x2, 0x70bd27, 0x25dfdbfe, {{}, {@val={0x8}, @val={0xc, 0x99, {0x7, 0x61}}}}, [@NL80211_ATTR_COALESCE_RULE_CONDITION={0x8, 0x2, 0x1}, @NL80211_ATTR_COALESCE_RULE_DELAY={0x8, 0x1, 0x9}, @NL80211_ATTR_COALESCE_RULE_CONDITION={0x8}, @NL80211_ATTR_COALESCE_RULE_CONDITION={0x8, 0x2, 0x1}, @NL80211_ATTR_COALESCE_RULE_CONDITION={0x8, 0x2, 0x1}, @NL80211_ATTR_COALESCE_RULE_CONDITION={0x8}, @NL80211_ATTR_COALESCE_RULE_PKT_PATTERN={0x1040, 0x3, 0x0, 0x1, [{0x1008, 0x0, 0x0, 0x1, @NL80211_PKTPAT_PATTERN={0x1004, 0x2, "6d51246f3f36b059c67d0143048f151c9e8424d413f848d08bc5a909d35b95b0afb9129a4ff0f4d90c6a1536729de8ea0537be638713635786c1e40e9d52d208b5e6a034d9125a020a4741105a8e4ea52e48e3ae5333c8c65f7d445056eadcb79016c04cd7e79ae889c1f6f4cbc81f8f42b694b86562d8e715b59807e47804687a93c87b2bd2395cf8947b150ba01ccacc2fe620850804f4ae9f86db4defb2acb0fbe64d73239734fc88992fc2e8ef82688939006d6eba8abc580349b8382935d237d5d73534059b6a5f693a7139383820766b579805f0eafeb6acec953583dff0ecfb89d39e162f916d203b0455f699da7f6d3a04ff424c8a29da3eb7f467d41ed65c3fa455b1caae71e90a703111aa176f52c0589dd728b870d8fc805bc1f045e9ad95ea1e495cf457f37f9bd035811146fc70e2c7a4ba12bdac6a2ee7c2b68be7700d645d41e7bd522b37724364a4b50ffb549bddbfd13cbac91e45c04ec51d7014f5dd08a4c005a6e00e082a9294f45deb4afb391b513f62f7d0c05da67db02448ce1ff0d76f081412527eaaaeebcb4211ad0d1dd6bf753c964100f65b3fa43e96a4cd75e5678ab298746d6b3468824abc579d1d5730edbf1323fad8115569afb7c6ed01c01e274220a0ce920a3116f578fae7d36294164bdc2aa2a43a6cec73e3317ac9702240beaab32e12f47bfca69a6c336dec12de60f1c7eafa5a772ab535bb873230aea864b2ff19e6654968ad5d51a4afa23c5841760733a08978f7a5005e7da546476d0b869f1c465082dff576ebd9aaf1a438bc4faa044ba44819efaa3998d22db4f382e8213c68ba8c9728bf3a08b8081f397926ba5bed14d2f6d97f3b8b4b36864f3efecdad5bdcec4a424528ed26d82f85aeb0b4699eda424d06f83b3bf01327bb55996a6da947abb56f88cfe182485e30ed22b0a551b363d16b76eabf6820e107b74cf1d04e9c898787ecead7953b44e4ce02ef5f4a23c57e4397943f3705e1f2e3b4dfec1a67540bfe671d29c5e9d5d4bf4a0518d3fa84c4de70a3cc21305a299261e7cabf6f079cd39468397ebe1fc1c799f8a3e1bea477e2f30d8fcba1e89b7790b3afda7cc20064b6a32a41bcec07abae6b37421b38803e849285aa4a0c9cc17ce7416bd8aaef63fc98636e9044fe1af71cd5a025416eda7e4a89f0da821258f85400fbe47ec7862aaf39da1eb7decec09f15a33dd863f4686390b982b690228396ec6d982b41489ad8d96374eff6273b7f286f4c21d6f8d72f463782cd86792d4b849e1feb884a76db02e0ab26e568bfc8513060d393267cec2d226a05b05c2495f30af56873cdbf11c87f395d9cd0d592b11e14b9fa9c7ccaf27b5640ebc5dd0b19f2de1723698e93cd5a884544eab3743c9297187954cc860e0134f2e9e712c8b0c7bec5c6d68c7ec1a3e1a697bfd461de26fe1445327761dd1ee648d15a3b0764d74a41ee333a902be7fc8fa8af9b8b8cc12fe672b82aa3a5ea60e32f284b7cfd4173312ef8386c65e73f615878eb1f8c7bd8c51ed2947662d490d002a9cd252fc91c22e80947b2b77503b9e3031fe24e5a720f34fc42b1e16988c61b91cdd9857beca586c0450c1232f6dbf8062ab73676e68fd042a6177d1faae1fa7dc757c679f22b174de3b1da38dbbb8ad523329728e638db33830fad2f949ea8cd8afc310cbcf8fc526084a53541a37ed6167f8307dcec42f2084b5dfe06899b9a9d83ee8e0b5efc40c46af0cbabdbee9c7946a9f4342c1e1ba4c983e182449ade54211d833e4d870123ae09565f65df0c97b81a632a8f313dc04ce6e284e85de3b0a4b07ab0869b321d4ef71863ca17d44a9a51879dc88586c779c830c9ee725c8e87a5f4153be4dad653698ec035a3792d6450f8c638109f9c048d964268f4ba5fb6defc837498b23acef520f452979ca17006ee6e9305dc17d77e585e9e3174b7cfbdca1d7373eba06cf39e568f72cafc051ba0b46de40b52cab65d3f929d74ee6b220e21d5f3cddb0a860e63a8fbf1999cf8ecbb610695976ca2b4baee13822a242d7ba7aebf05e2bde9cbe6736c686ca09f6ee51fec64b6f03e28ebd1c6026d03ed1b321e34dccbe2aed1eefcb0d61ac6f33ab0e256d36f0ba1739931541ce8fee4c8e0bd7694f6a7a92e0767ad5aa6a6517e57d17304ee51d1735b10c103a28ac210cc9e5df14f88cb203f20b9327662c3c434ab2318fd0fca4d584f8ff4df648c309ec032131995d450760ed826d0cb193d0c8f90b060e4559d80aa1cb542dd2b0ebd8d0e9c1e5ea86e3cd927c81b3ddcdd11c9a70f1317ecdb69c5add3a30548502f5dc63f6c42bf7ae8064eccff4391edb08f2b9d8f4557548185b7bd47fa478ed2e2661c77b8b2f5bf980a89937be7fbd43fa788433ddf16de6651c817c966140ea36a4d484ee2a56d9ef13365787e9f8bdcaa01b7c326729202043a41f21b8c146bdf3170c9233403e4d8f0b9a379d3ab0360dbf4490158b2e87a772ee040e565c72f398549437c2e2307784e7f27cb01bbf0faa4a3d63ecdb0353756d7b20e765324d38993861317d9e7d88325919ffc0e91663cac87d1a9dbbb57b565f0ce2330ebe882eedb752847eb093a8858c15cbcdbd7dd9d76579510ed2d48ee4e588a9490b45f6a3ab4025fb7b36d5c41569eefcacee805402d5799bdd86cccb88d63acfbc45d0b3050a6819af76df58ce519b4e38db9357389695a62dc53a438a628956901f1f8d3178925fee52297f20ec7e2820321044ef45eb3526a3f72776a6f6cf1e256385ed3697603f2391eb7d4a713808125c7a741449e12a90322439b12fec74a6eab617c0104cc01405d92cffaf87731fc272663c7a158c65cdcb29ba89ed03ce67a576401aeda141426ccfb466b5dc7a627fefe6a42a6319c7cfbce79c0c1d3b78d6fbdb13a19a2d7f646982d62b7dbc4c7bb72e5dddb54db7040c4bd31ea5c48ea7e75ce5ab1d51da99fa1b07a045bf3236174a77d64d4071f51397fd351d27f86cb7ebe426b93d39a6ca62afd987f0530de95c4c363593e306c0df4fbe01a57b425804e10cc343a2f91ec3886c5f9d42858899f3675795bfa252ba64cc8fc228e86e997428133a76de24435b945cae6469553c2a5da40c34ea5b09bd57ea061ca738bce857b6ea71c4f8933177b05fa4214c8991649a6f8816fe7316d53ecc3e5418f47926b12ec940ff42b3e9c5690c6ffe301b9c213fb87ec64b26114eeeaae1de5fcd22aec4144d786da50261479c59136cb5377f2dd7b3adb9fb0b9a61689cc6c9d5c760baa1b1b86ab24c27997deff01c0b6755b94f3835a040d9428a4e8fcbf56bdce95f85543ebc81d0ddfa54943315e48d2e0b65d8e0568e0b489a436a09097aef5b3542eeb53f46fcdf57655c5992ff47ac16b86a8e2f94ed121128c43f56fab9be77ed0cf681c85621dc6de85cf5b096782992afeb51d236f47e7b09d81d08b3fb001b4bb81e88124a59999e51bf98470c922f6c66cd9c66da53520fb98b37959489c04a4439f66a366a5a2f7ca258c5de83a60a9b2352fdbd9102a93ba49330db79c8567899b14fb0d22366eabeaaa6fbfb7253b388995408d4f63de8908009fe4abee95b347346436f339a37b994e3c695eb9ae47f2cb327c4dd899faadce46b011944fe2ef1f32df6dbdca110bb005792eedb72e85726f3124f85b01068db7b5b827c37e527b3f9050ca4cc360af842b71aaf0ec8a9959f359fcae45df395ce6078359d571c6a75ab1a3350bd23de0922f4ffd6414b6438583e51377d3489651b8b11ed645b12a96e9cefe28d34b007757be948c9907e2aa08445454290ddf852b39ccaf2f217f626b482445fe355f47d053ed3a543a54a32c6517a45e641843638f9cf06a714572d513b6bf5c16d297780912993f8080fb1bca2245daf1c09718cc4ce239fb28b00168b332f0bfac69b1c1b00dd664111d9ba81f06ee6cde172d996971f1cf6026d4e4b6a02ee3416232509e3188c5d4c60066c4710114bce486ad42e25e6ed12dbf61a13210f6753e32807c814f420cf6c4af4d12a476c2f20e3ee2e26c21c9fff724dc5cbbc8a1b33da58c75df6769293e6db048958a9bc56f4831ca3cb0add3b9ea327998572c3a61425ef9852b6cbde37d0fd26748498aad4bd6a889d62fda6483e7d42d49a32a443b0b23e3d9d9ce7fe58e55e5a2e0f8cd9baa5daaa6e687f44c75b39dfced96e2ba3f348fe3ab24990bd8bcf3f4e8b375bba6e2aa222fcaf9f1fe4698d82684ca0c72963f3b9358fe1809266bf8fd506747712e1fa19d46daa19a7d4e42745ac217816a06a8420f609fea84be2f70ae1d6d43998ffa09abb680c3198b5e36010ef30543375e749546cf7d6919882bdd66d7943e972c2c7a68f730e9fd5efdf744306078123a47e157521beb651eae11c3990917964a502e2d325a38bd1335791749766b78adaf1862d5ade55d5aac4ca1d358d3aa449d5c62e2e94996d2f4f97f1e8f24966f2dfaac52e4df48de073dcfb1244d18b80ad97bad8f1a826dc2777864795b1bf80754d3bf84ee61e5ff5c5107b95d9e22692a99cba9ee0469cd52a3bd8834f6049a3f83a29155edfae2f4193c8fb0fb373e5cc7cf16abfad48717037320ba98eda8b3b00d69d28f2e29de35e9e2b74769d27d27fd40dc716c6f550ffcd40b35014ba68b2ff5c48a2ec7a0938ec6b08c850bae5885e4e8b91e7641010aa898d3e39e3511ce25217ea3a29a90aae370b57dd14aa8fd8fd980ec1fa4e77870d3be172747448b3c0bdbbef616c3911299733aea494a0b850cab128760d374a7df7ca868ebd64d3949a59b2b621381ba683bd8d383fd1975dcc99ebcdf9c3f04e1987f0a9ec32753ea5e53b67ba6c200ed93679ced5b4d84f5d6bd1c3abd4b32749025aecf88c9c03390455fd0b55a47e67de4f7ff78af1c40b602983d01de7d99c5084ca89749143f799f2f3d9ad588c5e5a3d75329b61c29ac19344436773515dc67f521dad7a17ef4445f3462da1caa91c8c80c3698e1961c32596e1a410cc59021375263b552267f52653cf1ba597eb21b883ff201e293c078c4aa5ad2c3569438a9a4b93141d8dda2d2ecfda7ed52bb02ae97271015bdfaa5276ca6024122ac48d6cf62b223fbd17e3c87ec58a78f745c5e01e8df088b4d5018b92d45fb1842b64a4ae93490443c1bc0a5cd170d36da85531f6c9a66c4a7db9c395518c4f2c7b06d19537a838f64f57c23b6307d9c0dc5f97396f41151485c4bfbab063b1327da49f4407738ea1385cfe0d4c763c9bee053db02e3027d47c6bf771a2cc6bf932cfd5ad42695799a634e39105ab128dd6265af8a8093ab116e98af5b2af91e34129d84a50e966408b45603f65bc54ecde64071cf6af124ebb6bcd799dc3637a2981c9bccbb86427c1b8fab76d39fabe8fa4bd01d0ca456904ca408fa7aee50824c5b0ce28357cf8da36cdde6ea83be8ed2947173d0dc3cad64b9d6601e62854063b65ea490e3a4ec24835d5689b9888b7ec6fdcda38f956741d44c84c064dd91c003e931dea393ba15f0ab47f3f6889e4e64a3d0ce1d4962b35dc2b075afe01978bb64ed91bce56749752de33988796f98fa1617e254047b8499639b27043cb3350d3efed3ffed103f720e8cbea0ae4e122493ffc683e3fc9be5a3557ba7df7127cc2a283b2ca7187228d2f8d495019766b0fa744a025e185742eb78153b4aa7e80128d6de6af1fec8ea3f715c178c86d100f9d22a036d2225ee144e29cf56737cb6c93b8f9d82694a877232e0c3b31c9a"}}, {0xc, 0x0, 0x0, 0x1, @NL80211_PKTPAT_OFFSET={0x8, 0x3, 0x7f}}, {0x28, 0x0, 0x0, 0x1, @NL80211_PKTPAT_PATTERN={0x22, 0x2, "f5bf8a3fdd833fd4f35aa085c2ffad03e9c1e8a3551cff5f2683f36f4815"}}]}, @NL80211_ATTR_COALESCE_RULE_PKT_PATTERN={0x10, 0x3, 0x0, 0x1, [{0xc, 0x0, 0x0, 0x1, @NL80211_PKTPAT_OFFSET={0x8, 0x3, 0x7}}]}]}, 0x10a8}, 0x1, 0x0, 0x0, 0x10}, 0x1) r3 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r3, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r3, 0x0) accept4(r3, 0x0, 0x0, 0x0) r4 = socket$netlink(0x10, 0x3, 0x0) r5 = socket$netlink(0x10, 0x3, 0x0) r6 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r6, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r6, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r5, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r7, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r4, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r7}]}, 0x4c}}, 0x0) [ 1962.156975][T27708] workqueue: Failed to create a rescuer kthread for wq "bond904": -EINTR [ 1962.443597][T27714] bond1009: entered promiscuous mode [ 1962.478504][T27714] 8021q: adding VLAN 0 to HW filter on device bond1009 [ 1962.547832][T27715] bond1009: (slave bridge974): making interface the new active one [ 1962.558450][T27715] bridge974: entered promiscuous mode [ 1962.568438][T27715] bond1009: (slave bridge974): Enslaving as an active interface with an up link 01:55:12 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x3ea, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1962.666950][T27731] bond957: entered promiscuous mode [ 1962.685694][T27731] 8021q: adding VLAN 0 to HW filter on device bond957 01:55:12 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x608a, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1962.781916][T27733] bond957: (slave bridge915): making interface the new active one [ 1962.790174][T27733] bridge915: entered promiscuous mode [ 1962.806446][T27733] bond957: (slave bridge915): Enslaving as an active interface with an up link [ 1962.927180][T27741] validate_nla: 10 callbacks suppressed [ 1962.927199][T27741] netlink: 'syz-executor.2': attribute type 1 has an invalid length. 01:55:12 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r1, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r1, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r2, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x262, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r2}]}, 0x4c}}, 0x0) [ 1962.998583][T27741] bond904: entered promiscuous mode [ 1963.004609][T27741] 8021q: adding VLAN 0 to HW filter on device bond904 [ 1963.096521][T27744] bond904: (slave bridge871): making interface the new active one [ 1963.104964][T27744] bridge871: entered promiscuous mode [ 1963.114674][T27744] bond904: (slave bridge871): Enslaving as an active interface with an up link [ 1963.131145][T27747] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 01:55:12 executing program 5: openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) (async) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) (async) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) r2 = socket$nl_generic(0x10, 0x3, 0x10) sendfile(r2, r1, 0x0, 0x100000002) ioctl$SIOCGSKNS(r1, 0x894c, &(0x7f0000000000)={'tunl0\x00', 0x600}) (async) ioctl$SIOCGSKNS(r1, 0x894c, &(0x7f0000000000)={'tunl0\x00', 0x600}) r3 = socket$inet_smc(0x2b, 0x1, 0x0) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(r3, 0x81f8943c, &(0x7f0000000100)) (async) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(r3, 0x81f8943c, &(0x7f0000000100)={0x0, ""/256, 0x0, 0x0}) ioctl$BTRFS_IOC_INO_LOOKUP_USER(0xffffffffffffffff, 0xd000943e, &(0x7f0000001800)={0x0, 0x0, "02b66c11beed7c64705a2327ec95767e4393a580b2c3043a2fcfb08839b8897467f4a525091f0f7d2480b2fbdcfd2a3924b674e8aed38628fb035a463934e151ef7c0289d4fd7b308135026f77657ca78a849330727be579703bef5f51cf16a7198f8eb8e962cc55e47a88645dc99b6e4dfd15399b64e979124ef3a9120208d05d9de3ff1ce9cc9e6353b97b13c914e3530a6ec5b967674f3cbc69538c66356f6777af618dd96e1730048727e1752cc8139776c1e5061154ffe7838008ce90ccbf0827c03a28016d5f3bdabbc98bee9c405509e3e094ba1677d6b347061c346722828810ba1b68424c585770f6527f3856630aedb97f2ed5ff2e013d5d412046", "e26481ed1e7c639b5947fa03672a9556f2d9c88f35f8f8b62d6b01c1aef3d08f4ee43881217f959db47d280e8448925694f755ec0256840e58a31c14f0d78d223c58da8e0bd812fb893403e655823624c9e0581484207a6d914ad9befaa148971274f98aa0b753b38761ffcd20135aa09bee95ffc38cfb410de6eb0b1c0eaf69af8375bb982d21281acaa2966309f31ed037b8789d3bf55cda6f1fdefac7c7d4eec101525b850f72c5d3515de41ea6c4cc0a1d4c9d0e83fe98d8baa6325482d6270833cf890aba6768abf6a6ac45c0268bac824f692a521bf8500ce437d7bd4ecafbf918c063d8af3d110e24ddc569f535794d4c8c4ab3897b27310c8d39efca731b7a22caf0ec5f2df04ce6b496582e72b5cbb10480d59c191cd3eca3d3a973fe653216cd08c8de4098133f85da499af2f6a6c7513755f40d13810388f5476a67bb722dc832e4af4c76fda32aa9699c8191a644df90df4b2b2e7993e90bee48e3b65cbc84de44a15926c157018e46c849d57933b96f67dcec40eb733515b880aabbfd1324433ef61e0a112430d3829717820a9eb79c8767614facf386e07a7df2ceb2e9f9d33d65d33fecc7697f5bf74769a67ac297756ec495eaf87674ad5fae2fd772301896e85c617328f32f69bb718bf1ade1d8fd637a6bb4c7044929bf43757821b2bde4ce2ec164ef3841458207326249547ccd2c3ca3467e8c5474cb820ee8647c90d2da6af054fa1f25afb1b0792dcf21b2736c67fc8119a6bdbe19689bb639113f5b9165a50b6f2df8dd8c549220488da3bb10e45dcc7a6207635a550e5ee913aa8ec6ea5f92ba59eb003424e6ea472df633220c8e9644d5bf2f4b01027fc5c0422c39932531e1be7e62e021ac4592b57d95720d5ae86f4bd11d95ad82569ab24e0d1b5a144e6fbcc76c4d7800a70069a852979a3a1f02fcbef6bfeff2101576c998109c65350175bd05435d4be236097340cfafadfde768b1176e6c40f34731164afcd0c3620a0cd015494e432e9aff2c59a4cf476d9037a398183fe74789da0300172e1c02173ff867faf6fbed165ca819be3e4ea05fb6ff1633430984bece64ef93ff8d012b9c321941d59f5b8572008f6bb22597864ea1fbd6b9e349b6d9dc1adc185ea32d5e67e44ac5ebea2063635a53e1718a4246ea47a8874daae0ac6653f821f381a940cbfc98d4a25aea33c63309ee1c6c20f349b673509f09b5381956611359754bdb2eeda100ccb4834596de45556611d5b568f2604653c2129a7d5bd50a209a6d4a956c108b5349a8d208431a2edb17d7650332d308ea2e6f324e589f0e98de49ab655a77509637f0a65614c33f16f91e0d6305e8f9ba0766154e1599f67005ff75af7a6143e574acb799f2363dbb37971fc451c0054b5753de3e2278afe06b9e64b93ab09b4985fe26626a6266da1088f7b9755ac8a9e4e6e99a0dedfe2b1509de12f75d9dea83475543d2b328e986779f4992c870ad128b9d09e8ebcc264e8bb5b85d6d62ab902b8ed7409448cc26a777882627bf3e0c9cdd473289154560c2838c6d4fdaea97ff5d7992909c67710dfeb4977ea7bc06d77b06a19efa42178c46a3fa66ed7d6e2b67ab86be5d94721b099947563db706c5c3a12744ea61d63fce93f546f2669c50b6568e3f32d79bfc75adefc21ee93c5c5360cf2e7ac19571c3663857baf3325b666570642da4dcb309dab05b7ad05bf832d28fa8e871f7b89d6f4327953f4dbde3aac022f4dfe050d0924427e39a8027fbc4b54c4a8c2bf35f8c11c9c0ac4bc1fc31d6bdb2cee2675c5a782aa3ab5ffcc7be7207c89cdb8546fcfdd3a5aadfee5a296abd9afc127ebbf580649e9132b55d9f40a3778af49248b593e00c9f4812b8f7adb7323ab7852e4ed09c55aa356b2e8fee6eb14be3659fdf821d23ac556845f70670d8b6a417c29ac0b1585ea865634c0c921e9d930d4018f66d1e024179d80a7154c491c8662d427ab44a3633480782889f1d00c1d182ede30d4127d769291ef408b574a41c29153b7d949d4648b60a4faf32b380dbc87146c36479d47023876abd4ae7e289ba79d988f6c3a86a75bdc784bea2f0b7e2f77cb1282f54a18e1117e50ffa46ac208fcf8a7b5751e83b3e80cb428e4c2ae63ca3cf7b2c4353303edfe328626391f7925f1ab4ef1c7d0f54d0e45590188ecdb6d2c9c0db9b0552dc81b21dc1bbe9e94be5f78dad11f53d6fc3602e9ffb872d3557c44a133ee94e50d57d5eae2214569975982c63f22750db6f5c979ed81b3f68fe6be6daa85bcd98f25548c1d4db1f15394bc708933e0352e4059bedbf832abaf75e9bdca6fea93600357ca4153357d2e0661a308edf82c0f53e7a121c7e1e8bafdc5fdb61c21f4716d06b43a8395aa915a34c4dc4b5b15ba70a4e163728a9966515682b9ca3c2499aeb17a3c17f905fefed806b504f77c52564006273282db5e0c70565ee1016fc7c241830bef951294db9682ef41742b6550246e539143f15d6c2f017a8083ec97eb3a2de8bc2d8d5fec2b9b88996b3ff6c5aa5bed326f72fe9190de74a83e380b9fe89324421697d124d9a1bd3ced8e1856923147958582d737a36da6af4a0fd92b83f0ef0cb1a725d3a5ecb3a39ad039d200989a281a0686336457824ef582698222b7a063475b793a21745d6701940a0baf124461ad71de8dff6a6f7ed676d107e01ae6b7b79aa1f96efce91039dead977bf70365de8ebb3ec06634246d62f78287831984accef27048794468f520db5c71b4fa94818ddc7394fa1b609adb8cc80c0f32efac26a47bd74119100f0cfdcb889aab1019786cc0eefc1e4295ae919e9f6c8c8a16aa76a2bfe39adf5929e9b9925da0241e734378fe140981e3536bf0b77eb0263297a936a5f37605f128d79b16723b953331f99633b8d29ad1d1dbdb74188488ea0d1b7e0ddb652c040aa0477a079e92618e52f3b7ceebd62e0f0c6946469c19ae828b7eed288c3a53320c9d5468c39d608bd42d967a21e6b788de7c6826d1b99130083182562f63443290aeeb24308d3eb4881547db34b284d9af2bd7cac0d2f66bd14758c079c345d2bce3e1efc3599b0d06e69a92db7e05473673726e1848df75e83df50f98a9321468c10c6514dd7b3cb5b0ddf2fef13284463fe88ef2bc95d51288e41e3e0ba5c91bd686d7f7658b5bac8e3991a0f3b36f004585d9edd09d478ede73e7da067ea502894fc1247e62c1a84c9065ffccc3da96f07ddce135ceafdd784dda6f64a7add400d21ae13abf98e90fb96fcda23a8ad79905428a349b2230c19cf8cdca1724382bd19b4b075438098bd46cbc668bcbdcf1da85f733a50669f976a7106ff1936f20d799e7d01b0beeba7057a90016fb2d36bb3d14e11ac077ffb91f139d16ae5e78d84559312c9fd1a91ff70e5d9b4fd27940415d2f605665cd9374a50f7fc3c32efe30e4c0fc84f0c0e0bcd35e46665e4f29371c1c96324f65a94c85874e8e3baba68b88acc85c38f466d7353b5a00953c8ff5522cf0903e646301e3539d047286d10be16d9fbef02d450b7b12b1ead250f68c4c893fbc6f48765f34a81c477966278a1c6945d14d6531f0b5e4cef4edaf3708a5787162c39b236272694b0a302465d01004fa9d516414c040c27fccbf38466023b06293bd07f31fcdbb3b5378a8b2c7886e1551b1caeab96f7f3c159075756f5f8aaa05b6f87f581d802903c36d84169f87c01a77c43284adf66daa38fe8633a8f6d258231466aba89bb9e56ef289d2815cde433e381ddf74852302684b974c80d0a1d7d578039120a2db36213d167687e9390ed8f14c709d3149d2f5cfbc42a9b85e6d10c4ee77270534fdff2ccb816dedc6377257a73ba2a4ea82c0cc4a81bfb939f710b109beb279edeeda345867c0130ff6fc0fbbbbd25d16d2e73f6ab2fbb4c72abc0713ef9e4690b72208c468fa64c21f2247867b5edd514e5be23733ef2136b10e03161fe3b4e6018933df1f97a8250747e6140780a064e0c35bfafcb8177a75e8fde25e61e1c6332bbf7ddc3a11910feb3bd66707a7c1a9f87b320298acec88135a177b2f6f3c0ff02765f34c30b078b58470cd227ce4c0a1a77e662180b28fb360c3fbf47ba8982510979b86332b6d8a53d5f3947665c119a71e5b6ddc64228b47c3e23c30ebbede354d71d2780456de3f717a384112ccc9805dfe107fc9440ab7abccd8463150ce1306778fbea9793d88c4a7864a925bac5da593d6b72ee2c743f0b732a10285d293359c8126004f06fa12c0b3ca9c1e9c0a75b587207965c49f7450cb4210da01e5604f83f849b7eb9cb3f73de4bcff064b4fe08580971184db940bf29d6b8cea027dbfe0b78d6fa574da4b5b4db57b4e1939213c9848537baaf8076b1db4e767a467ec6a47c67aa33df96d7113cc9884124bca5579ce0058618b1b1f13ff3b102ff54e6bde7e29fd555acd6ffe6429af27301b6bbda38403d8f6f3266d7724f517778e7b2c1e13e3d83104ce743857c07b70cb5be8d6d8757d63a3bedfc9de79cc7e96c9833e15fd65f1336178568c2453a49cfc8c8dcb4f5d978f0a6144d62a5da6fd75d08a41084d4c59b345e07a5f4446af6e5216cc8d94347d333030015e262036f0a4fa6d2ae523654c55b37ac179efb66d230de5c70a9b33738ef0cd4ba2710d9ec03f426701101182051516a9be380a07e2555a8cda03eafc72d2bc2bc1dcadde4bb819692c1736b0ed203c4934842d791aae9e10bf239cc5393c9faf967109444c8f44532766ca5481f0ac16d25753a121727271c71d97b401dafe91588b362f2798f047deece9f860624b2d5753e46f929f8c2d03753e7245ffed6d8e36c7b380c4fb6a27e087a38b5e4a80f0043f95e5a20701c62692e684a764074e47badcecf8b2145be47b5b7089c249abcf0743a61b517004d120929d7846a39a46e0ddbcf5334fc01aca0bff31e67da8b3c88e38504db1dc3940c55bee158ae6dfcce289cf91106397d8e3990149a86c819e0354d785a4eed76fa6380491b01efbc23e7189ec253884d384865bca5da9a0917d68144a0a02cde867c365d339a025b7c8a16b82e341719a259ede8f09c165a354fd3e8f5d59e349e7c36302cf8ed115537969b598337fe7575157c89a254c0829cdb243d3d788321c756bf2817721db4bead96e1f25be5b8c7100d149d13900b6c6491ddbdbeaef7753ed5c5d9b07449bfd023501075ec08c37c13df696bf73500bd440a6522f5b955862c5eee8dc6e875c5055350b3397a6b31d2b764308ad24aafd4113af76f38f4aaba9e24efa3ad5b1c008a6cad2411ef6c7276dd3a5a2ae8130f91c36c34137731426fdf5272ef4ac5c4415e2d0f7b50da3ae910ba22bb5b962351e841746b00"}) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(0xffffffffffffffff, 0x81f8943c, &(0x7f0000000540)={0x0, ""/256, 0x0, 0x0}) ioctl$BTRFS_IOC_INO_LOOKUP_USER(0xffffffffffffffff, 0xd000943e, &(0x7f0000001800)={0x0, 0x0, "02b66c11beed7c64705a2327ec95767e4393a580b2c3043a2fcfb08839b8897467f4a525091f0f7d2480b2fbdcfd2a3924b674e8aed38628fb035a463934e151ef7c0289d4fd7b308135026f77657ca78a849330727be579703bef5f51cf16a7198f8eb8e962cc55e47a88645dc99b6e4dfd15399b64e979124ef3a9120208d05d9de3ff1ce9cc9e6353b97b13c914e3530a6ec5b967674f3cbc69538c66356f6777af618dd96e1730048727e164fcc8139776c1e5061154ffe7838008ce90ccbf0827c03a28016d5f3bdabbc98bee9c405509e3e094ba1677d6b347061c346722828810ba1b68424c585770f6527f3856630aedb97f2ee0742e013d5d412046", "e26481ed1e7c639b5947fa03672a9556f2d9c88f35f8f8b62d6b01c1aef3d08f4ee43881217f959db47d280e8448925694f755ec0256840e58a31c14f0d78d223c58da8e0bd812fb893403e655823624c9e0581484207a6d914ad9befaa148971274f98aa0b753b38761ffcd20135aa09bee95ffc38cfb410de6eb0b1c0eaf69af8375bb982d21281acaa2966378f31ed037b8789d3bf55cda6f1fdefac7c7d4eec101525b850f72c5d3515de41ea6c4cc0a1d4c9d0e83fe98d8baa6325482d6270833cf890aba6768abf6a6ac45c0268bac824f692a521bf8500ce437d7bd4ecafbf918c063d8af3d110e24ddc569f535794d4c8c4ab3897b27310c8d39efca731b7a22caf0ec5f2df04ce6b496582e72b5cbb10480d59c191cd3eca3d3a973fe653216cd08c8de4098133f85da499af2f6a6c7513755f40d13810388f5476a67bb722dc832e4af4c76fda32aa9699c8191a644df90df4b2b2e7993e90bee48e3b65cbc84de44a15926c157018e46c849d57933b96f67dcec40eb733515b880aabbfd1324433ef61e0a112430d3829717820a9eb79c8767614facf386e07a7df2ceb2e9f9d33d65d33fecc7697f5bf74769a67ac297756ec495eaf87674ad5fae2fd772301896e85c617328f32f69bb718bf1ade1d8fd637a6bb4c7044929bf43757821b2bde4ce2ec164ef3841458207326249547ccd2c3ca3467e8c5474cb820ee8647c90d2da6af054fa1f25afb1b0792dcf21b2736c67fc8119a6bdbe19689bb639113f5b9165a50b6f2df8dd8c549220488da3bb10e45dcc7a6207635a550e5ee913aa8ec6ea5f92ba59eb003424e6ea472df633220c8e9644d5bf2f4b01027fc5c0422c39932531e1be7e62e021ac4592b57d95720d5ae86f4bd11d95ad82569ab24e0d1b5a144e6fbcc76c4d7800a70069a852979a3a1f02fcbef6bfeff2101576c998109c65350175bd05435d4be236097340cfafadfde768b1176e6c40f34731164afcd0c3620a0cd015494e432e9aff2c59a4cf476d9037a398183fe74789da0300172e1c02173ff867faf6fbed165ca819be3e4ea05fb6ff1633430984bece64ef93ff8d012b9c321941d59f5b8572008f6bb22597864ea1fbd6b9e349b6d9dc1adc185ea32d5e67e44ac5ebea2063635a53e1718a4246ea47a8874daae0ac6653f821f381a940cbfc98d4a25aea33c63309ee1c6c20f349b673509f09b5381956611359754bdb2eeda100ccb4834596de45556611d5b568f2604653c2129a7d5bd50a209a6d4a956c108b5349a8d208431a2edb17d7650332d308ea2e6f324e589f0e98de49ab655a77509637f0a65614c33f16f91e0d6305e8f9ba0766154e1599f67005ff75af7a6143e574acb799f2363dbb37971fc451c0054b5753de3e2278afe06b9e64b93ab09b4985fe26626a6266da1088f7b9755ac8a9e4e6e99a0dedfe2b1509de12f75d9dea83475543d2b328e986779f4992c870ad128b9d09e8ebcc264e8bb5b85d6d62ab902b8ed7409448cc26a777882627bf3e0c9cdd473289154560c2838c6d4fdaea97ff5d7992909c67710dfeb4977ea7bc06d77b06a19efa42178c46a3fa66ed7d6e2b67ab86be5d94721b099947563db706c5c3a12744ea61d63fce93f546f2669c50b6568e3f32d79bfc75adefc21ee93c5c5360cf2e7ac19571c3663857baf3325b666570642da4dcb309dab05b7ad05bf832d28fa8e871f7b89d6f4327953f4dbde3aac022f4dfe050d0924427e39a8027fbc4b54c4a8c2bf35f8c11c9c0ac4bc1fc31d6bdb2cee2675c5a782aa3ab5ffcc7be7207c89cdb8546fcfdd3a5aadfee5a296abd9afc127ebbf580649e9132b55d9f40a3778af49248b593e00c9f4812b8f7adb7323ab7852e4ed09c55aa356b2e8fee6eb14be3659fdf821d23ac556845f70670d8b6a417c29ac0b1585ea865634c0c921e9d930d4018f66d1e024179d80a7154c491c8662d427ab44a3633480782889f1d00c1d182ede30d4127d769291ef408b574a41c29153b7d949d4648b60a4faf32b380dbc87146c36479d47023876abd4ae7e289ba79d988f6c3a86a75bdc784bea2f0b7e2f77cb1282f54a18e1117e50ffa46ac208fcf8a7b5751e83b3e80cb428e4c2ae63ca3cf7b2c4353303edfe328626391f7925f1ab4ef1c7d0f54d0e45590188ecdb6d2c9c0db9b0552dc81b21dc1bbe9e94be5f78dad11f53d6fc3602e9ffb872d3557c44a133ee94e50d57d5eae2214569975982c63f22750db6f5c979ed81b3f68fe6be6daa85bcd98f25548c1d4db1f15394bc708933e0352e4059bedbf832abaf75e9bdca6fea93600357ca4153357d2e0661a308edf82c0f53e7a121c7e1e8bafdc5fdb61c21f4716d06b43a8395aa915a34c4dc4b5b15ba70a4e163728a9966515682b9ca3c2499aeb17a3c17f905fefed806b504f77c52564006273282db5e0c70565ee1016fc7c241830bef951294db9682ef41742b6550246e539143f15d6c2f017a8083ec97eb3a2de8bc2d8d5fec2b9b88996b3ff6c5aa5bed326f72fe9190de74a83e380b9fe89324421697d124d9a1bd3ced8e1856923147958582d737a36da6af4a0fd92b83f0ef0cb1a725d3a5ecb3a39ad039d200989a281a0686336457824ef582698222b7a063475b793a21745d6701940a0baf124461ad71de8dff6a6f7ed676d107e01ae6b7b79aa1f96efce91039dead977bf70365de8ebb3ec06634246d62f78287831984accef27048794468f520db5c71b4fa94818ddc7394fa1b609adb8cc80c0f32efac26a47bd74119100f0cfdcb889aab1019786cc0eefc1e4295ae919e9f6c8c8a16aa76a2bfe39adf5929e9b9925da0241e734378fe140981e3536bf0b77eb0263297a936a5f37605f128d79b16723b953331f99633b8d29ad1d1dbdb74188488ea0d1b7e0ddb652c040aa0477a079e92618e52f3b7ceebd62e0f0c6946469c19ae828b7eed288c3a53320c9d5468c39d608bd42d967a21e6b788de7c6826d1b99130083182562f63443290aeeb24308d3eb4881547db34b284d9af2bd7cac0d2f66bd14758c079c345d2bce3e1efc3599b0d06e69a92db7e05473673726e1848df75e83df50f98a9321468c10c6514dd7b3cb5b0ddf2fef13284463fe88ef2bc95d51288e41e3e0ba5c91bd686d7f7658b5bac8e3991a0f3b36f004585d9edd09d478ede73e7da067ea502894fc1247e62c1a84c9065ffccc3da96f07ddce135ceafdd784dda6f64a7add400d21ae13abf98e90fb96fcda23a8ad79905428a349b2230c19cf8cdca1724382bd19b4b075438098bd46cbc668bcbdcf1da85f733a50669f976a7106ff1936f20d799e7d01b0beeba7057a90016fb2d36bb3d14e11ac077ffb91f139d16ae5e78d84559312c9fd1a91ff70e5d9b4fd279420f7647151fe951b705082230ad20415d2f605665cd9374a50f7fc3c32efe30e4c0fc84f0c0e0bcd35e46665e4f29371c1c96324f65a94c85874e8e3baba68b88acc85c38f466d7353b5a00953c8ff5522cf0903e646301e3539d047286d10be16d9fbef02d450b7b12b1ead250f68c4c893fbc6f48765f34a81c477966278a1c6945d14d6531f0b5e4cef4edaf3708a5787162c39b236272694b0a302465d01004fa9d516414c040c27fccbf38466023b06293bd07f31fcdbb3b5378a8b2c7886e1551b1caeab96f7f3c159075756f5f8aaa05b6f87f581d802903c36d84169f87c01a77c43284adf66daa38fe8633a8f6d258231466aba89bb9e56ef289d2815cde433e381ddf74852302684b974c80d0a1d7d578039120a2db36213d167687e9390ed8f14c709d3149d2f5cfbc42a9b85e6d10c4ee77270534fdff2ccb816dedc6377257a73ba2a4ea82c0cc4a81bfb939f710b109beb279edeeda345867c0130ff6fc0fbbbbd25d16d2e73f6ab2fbb4c72abc0713ef9e4690b72208c468fa64c21f2247867b5edd514e5be23733ef2136b10e03161fe3b4e6018933df1f97a8250747e6140780a064e0c35bfafcb8177a75e8fde25e61e1c6332bbf7ddc3a11910feb3bd66707a7c1a9f87b320298acec88135a177b2f6f3c0ff02765f34c30b078b58470cd227ce4c0a1a77e662180b28fb360c3fbf47ba8982510979b86332b6d8a53d5f3947665c119a71e5b6ddc64228b47c3e23c30ebbede354d71d2780456de3f717a384112ccc9805dfe107fc9440ab7abccd8463150ce1306778fbea9793d88c4a7864a925bac5da593d6b72ee2c743f0b732a10285d293359c8126004f06fa12c0b3ca9c1e9c0a75b587207965c49f7450cb4210da01e5604f83f849b7eb9cb3f73de4bcff064b4fe08580971184db940bf29d6b8cea027dbfe0b78d6fa574da4b5b4db57b4e1939213c9848537baaf8076b1db4e767a467ec6a47c67aa33df96d7113cc9884124bca5579ce0058618b1b1f13ff3b102ff54e6bde7e29fd555acd6ffe6429af27301b6bbda38403d8f6f3266d7724f517778e7b2c1e13e3d83104ce743857c07b70cb5be8d6d8757d63a3bedfc9de79cc7e96c9833e15fd65f1336178568c2453a49cfc8c8dcb4f5d978f0a6144d62a5da6fd75d08a41084d4c59b345e07a5f4446af6e5216cc8d94347d333030015e262036f0a4fa6d2ae523654c55b37ac179efb66d230de5c70a9b33738ef0cd4ba2710d9ec03f426701101182051516a9be380a07e2555a8cda03eafc72d2bc2bc1dcadde4bb819692c1736b0ed203c4934842d791aae9e10bf239cc5393c9faf967109444c8f44532766ca5481f0ac16d25753a121727271c71d97b401dafe91588b362f2798f047deece9f860624b2d5753e46f929f8c2d03753e7245ffed6d8e36c7b380c4fb6a27e087a38b5e4a80f0043f95e5a20701c62692e684a764074e47badcecf8b2145be47b5b7089c249abcf0743a61b517004d120929d7846a39a46e0ddbcf5334fc01aca0bff31e67da8b3c88e38504db1dc3940c55bee158ae6dfcce289cf91106397d8e3990149a86c819e0354d785a4eed76fa6380491b01efbc23e7189ec253884d384865bca5da9a0917d68144a0a02cde867c365d339a025b7c8a16b82e341719a259ede8f09c165a354fd3e8f5d59e349e7c36302cf8ed115537969b598337fe7575157c89a254c0829cdb243d3d788321c756bf2817721db4bead96e1f25be5b8c7100d149d13900b6c6491ddbdbeaef7753ed5c5d9b07449bfd023501075ec08c37c13df696bf73500bd440a6522f5b955862c5eee8dc6e875c5055350b3397a6b31d2b764308ad24aafd4113af76f38f4aaba9e24efa3ad5b1c008a6cad2411ef6c7276dd3a5a2ae8130f91c36c34137731426fdf5272ef4ac5c4415e2d0f7b50da3ae910ba22bb5b962351e841746b"}) (async) ioctl$BTRFS_IOC_INO_LOOKUP_USER(0xffffffffffffffff, 0xd000943e, &(0x7f0000001800)={0x0, 0x0, "02b66c11beed7c64705a2327ec95767e4393a580b2c3043a2fcfb08839b8897467f4a525091f0f7d2480b2fbdcfd2a3924b674e8aed38628fb035a463934e151ef7c0289d4fd7b308135026f77657ca78a849330727be579703bef5f51cf16a7198f8eb8e962cc55e47a88645dc99b6e4dfd15399b64e979124ef3a9120208d05d9de3ff1ce9cc9e6353b97b13c914e3530a6ec5b967674f3cbc69538c66356f6777af618dd96e1730048727e164fcc8139776c1e5061154ffe7838008ce90ccbf0827c03a28016d5f3bdabbc98bee9c405509e3e094ba1677d6b347061c346722828810ba1b68424c585770f6527f3856630aedb97f2ee0742e013d5d412046", "e26481ed1e7c639b5947fa03672a9556f2d9c88f35f8f8b62d6b01c1aef3d08f4ee43881217f959db47d280e8448925694f755ec0256840e58a31c14f0d78d223c58da8e0bd812fb893403e655823624c9e0581484207a6d914ad9befaa148971274f98aa0b753b38761ffcd20135aa09bee95ffc38cfb410de6eb0b1c0eaf69af8375bb982d21281acaa2966378f31ed037b8789d3bf55cda6f1fdefac7c7d4eec101525b850f72c5d3515de41ea6c4cc0a1d4c9d0e83fe98d8baa6325482d6270833cf890aba6768abf6a6ac45c0268bac824f692a521bf8500ce437d7bd4ecafbf918c063d8af3d110e24ddc569f535794d4c8c4ab3897b27310c8d39efca731b7a22caf0ec5f2df04ce6b496582e72b5cbb10480d59c191cd3eca3d3a973fe653216cd08c8de4098133f85da499af2f6a6c7513755f40d13810388f5476a67bb722dc832e4af4c76fda32aa9699c8191a644df90df4b2b2e7993e90bee48e3b65cbc84de44a15926c157018e46c849d57933b96f67dcec40eb733515b880aabbfd1324433ef61e0a112430d3829717820a9eb79c8767614facf386e07a7df2ceb2e9f9d33d65d33fecc7697f5bf74769a67ac297756ec495eaf87674ad5fae2fd772301896e85c617328f32f69bb718bf1ade1d8fd637a6bb4c7044929bf43757821b2bde4ce2ec164ef3841458207326249547ccd2c3ca3467e8c5474cb820ee8647c90d2da6af054fa1f25afb1b0792dcf21b2736c67fc8119a6bdbe19689bb639113f5b9165a50b6f2df8dd8c549220488da3bb10e45dcc7a6207635a550e5ee913aa8ec6ea5f92ba59eb003424e6ea472df633220c8e9644d5bf2f4b01027fc5c0422c39932531e1be7e62e021ac4592b57d95720d5ae86f4bd11d95ad82569ab24e0d1b5a144e6fbcc76c4d7800a70069a852979a3a1f02fcbef6bfeff2101576c998109c65350175bd05435d4be236097340cfafadfde768b1176e6c40f34731164afcd0c3620a0cd015494e432e9aff2c59a4cf476d9037a398183fe74789da0300172e1c02173ff867faf6fbed165ca819be3e4ea05fb6ff1633430984bece64ef93ff8d012b9c321941d59f5b8572008f6bb22597864ea1fbd6b9e349b6d9dc1adc185ea32d5e67e44ac5ebea2063635a53e1718a4246ea47a8874daae0ac6653f821f381a940cbfc98d4a25aea33c63309ee1c6c20f349b673509f09b5381956611359754bdb2eeda100ccb4834596de45556611d5b568f2604653c2129a7d5bd50a209a6d4a956c108b5349a8d208431a2edb17d7650332d308ea2e6f324e589f0e98de49ab655a77509637f0a65614c33f16f91e0d6305e8f9ba0766154e1599f67005ff75af7a6143e574acb799f2363dbb37971fc451c0054b5753de3e2278afe06b9e64b93ab09b4985fe26626a6266da1088f7b9755ac8a9e4e6e99a0dedfe2b1509de12f75d9dea83475543d2b328e986779f4992c870ad128b9d09e8ebcc264e8bb5b85d6d62ab902b8ed7409448cc26a777882627bf3e0c9cdd473289154560c2838c6d4fdaea97ff5d7992909c67710dfeb4977ea7bc06d77b06a19efa42178c46a3fa66ed7d6e2b67ab86be5d94721b099947563db706c5c3a12744ea61d63fce93f546f2669c50b6568e3f32d79bfc75adefc21ee93c5c5360cf2e7ac19571c3663857baf3325b666570642da4dcb309dab05b7ad05bf832d28fa8e871f7b89d6f4327953f4dbde3aac022f4dfe050d0924427e39a8027fbc4b54c4a8c2bf35f8c11c9c0ac4bc1fc31d6bdb2cee2675c5a782aa3ab5ffcc7be7207c89cdb8546fcfdd3a5aadfee5a296abd9afc127ebbf580649e9132b55d9f40a3778af49248b593e00c9f4812b8f7adb7323ab7852e4ed09c55aa356b2e8fee6eb14be3659fdf821d23ac556845f70670d8b6a417c29ac0b1585ea865634c0c921e9d930d4018f66d1e024179d80a7154c491c8662d427ab44a3633480782889f1d00c1d182ede30d4127d769291ef408b574a41c29153b7d949d4648b60a4faf32b380dbc87146c36479d47023876abd4ae7e289ba79d988f6c3a86a75bdc784bea2f0b7e2f77cb1282f54a18e1117e50ffa46ac208fcf8a7b5751e83b3e80cb428e4c2ae63ca3cf7b2c4353303edfe328626391f7925f1ab4ef1c7d0f54d0e45590188ecdb6d2c9c0db9b0552dc81b21dc1bbe9e94be5f78dad11f53d6fc3602e9ffb872d3557c44a133ee94e50d57d5eae2214569975982c63f22750db6f5c979ed81b3f68fe6be6daa85bcd98f25548c1d4db1f15394bc708933e0352e4059bedbf832abaf75e9bdca6fea93600357ca4153357d2e0661a308edf82c0f53e7a121c7e1e8bafdc5fdb61c21f4716d06b43a8395aa915a34c4dc4b5b15ba70a4e163728a9966515682b9ca3c2499aeb17a3c17f905fefed806b504f77c52564006273282db5e0c70565ee1016fc7c241830bef951294db9682ef41742b6550246e539143f15d6c2f017a8083ec97eb3a2de8bc2d8d5fec2b9b88996b3ff6c5aa5bed326f72fe9190de74a83e380b9fe89324421697d124d9a1bd3ced8e1856923147958582d737a36da6af4a0fd92b83f0ef0cb1a725d3a5ecb3a39ad039d200989a281a0686336457824ef582698222b7a063475b793a21745d6701940a0baf124461ad71de8dff6a6f7ed676d107e01ae6b7b79aa1f96efce91039dead977bf70365de8ebb3ec06634246d62f78287831984accef27048794468f520db5c71b4fa94818ddc7394fa1b609adb8cc80c0f32efac26a47bd74119100f0cfdcb889aab1019786cc0eefc1e4295ae919e9f6c8c8a16aa76a2bfe39adf5929e9b9925da0241e734378fe140981e3536bf0b77eb0263297a936a5f37605f128d79b16723b953331f99633b8d29ad1d1dbdb74188488ea0d1b7e0ddb652c040aa0477a079e92618e52f3b7ceebd62e0f0c6946469c19ae828b7eed288c3a53320c9d5468c39d608bd42d967a21e6b788de7c6826d1b99130083182562f63443290aeeb24308d3eb4881547db34b284d9af2bd7cac0d2f66bd14758c079c345d2bce3e1efc3599b0d06e69a92db7e05473673726e1848df75e83df50f98a9321468c10c6514dd7b3cb5b0ddf2fef13284463fe88ef2bc95d51288e41e3e0ba5c91bd686d7f7658b5bac8e3991a0f3b36f004585d9edd09d478ede73e7da067ea502894fc1247e62c1a84c9065ffccc3da96f07ddce135ceafdd784dda6f64a7add400d21ae13abf98e90fb96fcda23a8ad79905428a349b2230c19cf8cdca1724382bd19b4b075438098bd46cbc668bcbdcf1da85f733a50669f976a7106ff1936f20d799e7d01b0beeba7057a90016fb2d36bb3d14e11ac077ffb91f139d16ae5e78d84559312c9fd1a91ff70e5d9b4fd279420f7647151fe951b705082230ad20415d2f605665cd9374a50f7fc3c32efe30e4c0fc84f0c0e0bcd35e46665e4f29371c1c96324f65a94c85874e8e3baba68b88acc85c38f466d7353b5a00953c8ff5522cf0903e646301e3539d047286d10be16d9fbef02d450b7b12b1ead250f68c4c893fbc6f48765f34a81c477966278a1c6945d14d6531f0b5e4cef4edaf3708a5787162c39b236272694b0a302465d01004fa9d516414c040c27fccbf38466023b06293bd07f31fcdbb3b5378a8b2c7886e1551b1caeab96f7f3c159075756f5f8aaa05b6f87f581d802903c36d84169f87c01a77c43284adf66daa38fe8633a8f6d258231466aba89bb9e56ef289d2815cde433e381ddf74852302684b974c80d0a1d7d578039120a2db36213d167687e9390ed8f14c709d3149d2f5cfbc42a9b85e6d10c4ee77270534fdff2ccb816dedc6377257a73ba2a4ea82c0cc4a81bfb939f710b109beb279edeeda345867c0130ff6fc0fbbbbd25d16d2e73f6ab2fbb4c72abc0713ef9e4690b72208c468fa64c21f2247867b5edd514e5be23733ef2136b10e03161fe3b4e6018933df1f97a8250747e6140780a064e0c35bfafcb8177a75e8fde25e61e1c6332bbf7ddc3a11910feb3bd66707a7c1a9f87b320298acec88135a177b2f6f3c0ff02765f34c30b078b58470cd227ce4c0a1a77e662180b28fb360c3fbf47ba8982510979b86332b6d8a53d5f3947665c119a71e5b6ddc64228b47c3e23c30ebbede354d71d2780456de3f717a384112ccc9805dfe107fc9440ab7abccd8463150ce1306778fbea9793d88c4a7864a925bac5da593d6b72ee2c743f0b732a10285d293359c8126004f06fa12c0b3ca9c1e9c0a75b587207965c49f7450cb4210da01e5604f83f849b7eb9cb3f73de4bcff064b4fe08580971184db940bf29d6b8cea027dbfe0b78d6fa574da4b5b4db57b4e1939213c9848537baaf8076b1db4e767a467ec6a47c67aa33df96d7113cc9884124bca5579ce0058618b1b1f13ff3b102ff54e6bde7e29fd555acd6ffe6429af27301b6bbda38403d8f6f3266d7724f517778e7b2c1e13e3d83104ce743857c07b70cb5be8d6d8757d63a3bedfc9de79cc7e96c9833e15fd65f1336178568c2453a49cfc8c8dcb4f5d978f0a6144d62a5da6fd75d08a41084d4c59b345e07a5f4446af6e5216cc8d94347d333030015e262036f0a4fa6d2ae523654c55b37ac179efb66d230de5c70a9b33738ef0cd4ba2710d9ec03f426701101182051516a9be380a07e2555a8cda03eafc72d2bc2bc1dcadde4bb819692c1736b0ed203c4934842d791aae9e10bf239cc5393c9faf967109444c8f44532766ca5481f0ac16d25753a121727271c71d97b401dafe91588b362f2798f047deece9f860624b2d5753e46f929f8c2d03753e7245ffed6d8e36c7b380c4fb6a27e087a38b5e4a80f0043f95e5a20701c62692e684a764074e47badcecf8b2145be47b5b7089c249abcf0743a61b517004d120929d7846a39a46e0ddbcf5334fc01aca0bff31e67da8b3c88e38504db1dc3940c55bee158ae6dfcce289cf91106397d8e3990149a86c819e0354d785a4eed76fa6380491b01efbc23e7189ec253884d384865bca5da9a0917d68144a0a02cde867c365d339a025b7c8a16b82e341719a259ede8f09c165a354fd3e8f5d59e349e7c36302cf8ed115537969b598337fe7575157c89a254c0829cdb243d3d788321c756bf2817721db4bead96e1f25be5b8c7100d149d13900b6c6491ddbdbeaef7753ed5c5d9b07449bfd023501075ec08c37c13df696bf73500bd440a6522f5b955862c5eee8dc6e875c5055350b3397a6b31d2b764308ad24aafd4113af76f38f4aaba9e24efa3ad5b1c008a6cad2411ef6c7276dd3a5a2ae8130f91c36c34137731426fdf5272ef4ac5c4415e2d0f7b50da3ae910ba22bb5b962351e841746b"}) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(0xffffffffffffffff, 0xd000943d, &(0x7f00000745c0)={0x5af, [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r9}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r7}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r11, r10}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r10}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r8}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r10}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r8}, {}, {}, {}, {}, {}, {}, {0x0, r7}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r8}], 0x81, "7464fbe08eb369"}) (async) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(0xffffffffffffffff, 0xd000943d, &(0x7f00000745c0)={0x5af, [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r9}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r7}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r11, r10}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r10}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r8}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r10}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r8}, {}, {}, {}, {}, {}, {}, {0x0, r7}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r8}], 0x81, "7464fbe08eb369"}) r12 = socket$netlink(0x10, 0x3, 0xf) ioctl$sock_SIOCSIFVLAN_GET_VLAN_VID_CMD(r12, 0x8983, &(0x7f0000000040)) ioctl$sock_SIOCGIFVLAN_DEL_VLAN_CMD(r12, 0x8982, 0x0) (async) ioctl$sock_SIOCGIFVLAN_DEL_VLAN_CMD(r12, 0x8982, 0x0) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(r12, 0x81f8943c, &(0x7f0000002c40)={0x0, ""/256, 0x0, 0x0}) ioctl$BTRFS_IOC_INO_LOOKUP_USER(0xffffffffffffffff, 0xd000943e, &(0x7f0000001800)={0x0, r13, "02b66c11beed7c64705a2327ec95767e4393a580b2c3043a2fcfb08839b8897467f4a525091f0f7d2480b2fbdcfd2a3924b674e8aed38628fb035a463934e151ef7c0289d4fd7b308135026f77657ca78a849330727be579703bef5f51cf16a7198f8eb8e962cc55e47a88645dc99b6e4dfd15399b64e979124ef3a9120208d05d9de3ff1ce9cc9e6353b97b13c914e3530a6ec5b967674f3cbc69538c66356f6777af618dd96e1730048727e1752cc8139776c1e5061154ffe7838008ce90ccbf0827c03a28016d5f3bdabbc98bee9c405509e3e094ba1677d6b347061c346722828810ba1b68424c585770f6527f3856630aedb97f2ed5ff2e013d5d412046", "e26481ed1e7c639b5947fa03672a9556f2d9c88f35f8f8b62d6b01c1aef3d08f4ee43881217f959db47d280e8448925694f755ec0256840e58a31c14f0d78d223c58da8e0bd812fb893403e655823624c9e0581484207a6d914ad9befaa148971274f98aa0b753b38761ffcd20135aa09bee95ffc38cfb410de6eb0b1c0eaf69af8375bb982d21281acaa2966309f31ed037b8789d3bf55cda6f1fdefac7c7d4eec101525b850f72c5d3515de41ea6c4cc0a1d4c9d0e83fe98d8baa6325482d6270833cf890aba6768abf6a6ac45c0268bac824f692a521bf8500ce437d7bd4ecafbf918c063d8af3d110e24ddc569f535794d4c8c4ab3897b27310c8d39efca731b7a22caf0ec5f2df04ce6b496582e72b5cbb10480d59c191cd3eca3d3a973fe653216cd08c8de4098133f85da499af2f6a6c7513755f40d13810388f5476a67bb722dc832e4af4c76fda32aa9699c8191a644df90df4b2b2e7993e90bee48e3b65cbc84de44a15926c157018e46c849d57933b96f67dcec40eb733515b880aabbfd1324433ef61e0a112430d3829717820a9eb79c8767614facf386e07a7df2ceb2e9f9d33d65d33fecc7697f5bf74769a67ac297756ec495eaf87674ad5fae2fd772301896e85c617328f32f69bb718bf1ade1d8fd637a6bb4c7044929bf43757821b2bde4ce2ec164ef3841458207326249547ccd2c3ca3467e8c5474cb820ee8647c90d2da6af054fa1f25afb1b0792dcf21b2736c67fc8119a6bdbe19689bb639113f5b9165a50b6f2df8dd8c549220488da3bb10e45dcc7a6207635a550e5ee913aa8ec6ea5f92ba59eb003424e6ea472df633220c8e9644d5bf2f4b01027fc5c0422c39932531e1be7e62e021ac4592b57d95720d5ae86f4bd11d95ad82569ab24e0d1b5a144e6fbcc76c4d7800a70069a852979a3a1f02fcbef6bfeff2101576c998109c65350175bd05435d4be236097340cfafadfde768b1176e6c40f34731164afcd0c3620a0cd015494e432e9aff2c59a4cf476d9037a398183fe74789da0300172e1c02173ff867faf6fbed165ca819be3e4ea05fb6ff1633430984bece64ef93ff8d012b9c321941d59f5b8572008f6bb22597864ea1fbd6b9e349b6d9dc1adc185ea32d5e67e44ac5ebea2063635a53e1718a4246ea47a8874daae0ac6653f821f381a940cbfc98d4a25aea33c63309ee1c6c20f349b673509f09b5381956611359754bdb2eeda100ccb4834596de45556611d5b568f2604653c2129a7d5bd50a209a6d4a956c108b5349a8d208431a2edb17d7650332d308ea2e6f324e589f0e98de49ab655a77509637f0a65614c33f16f91e0d6305e8f9ba0766154e1599f67005ff75af7a6143e574acb799f2363dbb37971fc451c0054b5753de3e2278afe06b9e64b93ab09b4985fe26626a6266da1088f7b9755ac8a9e4e6e99a0dedfe2b1509de12f75d9dea83475543d2b328e986779f4992c870ad128b9d09e8ebcc264e8bb5b85d6d62ab902b8ed7409448cc26a777882627bf3e0c9cdd473289154560c2838c6d4fdaea97ff5d7992909c67710dfeb4977ea7bc06d77b06a19efa42178c46a3fa66ed7d6e2b67ab86be5d94721b099947563db706c5c3a12744ea61d63fce93f546f2669c50b6568e3f32d79bfc75adefc21ee93c5c5360cf2e7ac19571c3663857baf3325b666570642da4dcb309dab05b7ad05bf832d28fa8e871f7b89d6f4327953f4dbde3aac022f4dfe050d0924427e39a8027fbc4b54c4a8c2bf35f8c11c9c0ac4bc1fc31d6bdb2cee2675c5a782aa3ab5ffcc7be7207c89cdb8546fcfdd3a5aadfee5a296abd9afc127ebbf580649e9132b55d9f40a3778af49248b593e00c9f4812b8f7adb7323ab7852e4ed09c55aa356b2e8fee6eb14be3659fdf821d23ac556845f70670d8b6a417c29ac0b1585ea865634c0c921e9d930d4018f66d1e024179d80a7154c491c8662d427ab44a3633480782889f1d00c1d182ede30d4127d769291ef408b574a41c29153b7d949d4648b60a4faf32b380dbc87146c36479d47023876abd4ae7e289ba79d988f6c3a86a75bdc784bea2f0b7e2f77cb1282f54a18e1117e50ffa46ac208fcf8a7b5751e83b3e80cb428e4c2ae63ca3cf7b2c4353303edfe328626391f7925f1ab4ef1c7d0f54d0e45590188ecdb6d2c9c0db9b0552dc81b21dc1bbe9e94be5f78dad11f53d6fc3602e9ffb872d3557c44a133ee94e50d57d5eae2214569975982c63f22750db6f5c979ed81b3f68fe6be6daa85bcd98f25548c1d4db1f15394bc708933e0352e4059bedbf832abaf75e9bdca6fea93600357ca4153357d2e0661a308edf82c0f53e7a121c7e1e8bafdc5fdb61c21f4716d06b43a8395aa915a34c4dc4b5b15ba70a4e163728a9966515682b9ca3c2499aeb17a3c17f905fefed806b504f77c52564006273282db5e0c70565ee1016fc7c241830bef951294db9682ef41742b6550246e539143f15d6c2f017a8083ec97eb3a2de8bc2d8d5fec2b9b88996b3ff6c5aa5bed326f72fe9190de74a83e380b9fe89324421697d124d9a1bd3ced8e1856923147958582d737a36da6af4a0fd92b83f0ef0cb1a725d3a5ecb3a39ad039d200989a281a0686336457824ef582698222b7a063475b793a21745d6701940a0baf124461ad71de8dff6a6f7ed676d107e01ae6b7b79aa1f96efce91039dead977bf70365de8ebb3ec06634246d62f78287831984accef27048794468f520db5c71b4fa94818ddc7394fa1b609adb8cc80c0f32efac26a47bd74119100f0cfdcb889aab1019786cc0eefc1e4295ae919e9f6c8c8a16aa76a2bfe39adf5929e9b9925da0241e734378fe140981e3536bf0b77eb0263297a936a5f37605f128d79b16723b953331f99633b8d29ad1d1dbdb74188488ea0d1b7e0ddb652c040aa0477a079e92618e52f3b7ceebd62e0f0c6946469c19ae828b7eed288c3a53320c9d5468c39d608bd42d967a21e6b788de7c6826d1b99130083182562f63443290aeeb24308d3eb4881547db34b284d9af2bd7cac0d2f66bd14758c079c345d2bce3e1efc3599b0d06e69a92db7e05473673726e1848df75e83df50f98a9321468c10c6514dd7b3cb5b0ddf2fef13284463fe88ef2bc95d51288e41e3e0ba5c91bd686d7f7658b5bac8e3991a0f3b36f004585d9edd09d478ede73e7da067ea502894fc1247e62c1a84c9065ffccc3da96f07ddce135ceafdd784dda6f64a7add400d21ae13abf98e90fb96fcda23a8ad79905428a349b2230c19cf8cdca1724382bd19b4b075438098bd46cbc668bcbdcf1da85f733a50669f976a7106ff1936f20d799e7d01b0beeba7057a90016fb2d36bb3d14e11ac077ffb91f139d16ae5e78d84559312c9fd1a91ff70e5d9b4fd27940415d2f605665cd9374a50f7fc3c32efe30e4c0fc84f0c0e0bcd35e46665e4f29371c1c96324f65a94c85874e8e3baba68b88acc85c38f466d7353b5a00953c8ff5522cf0903e646301e3539d047286d10be16d9fbef02d450b7b12b1ead250f68c4c893fbc6f48765f34a81c477966278a1c6945d14d6531f0b5e4cef4edaf3708a5787162c39b236272694b0a302465d01004fa9d516414c040c27fccbf38466023b06293bd07f31fcdbb3b5378a8b2c7886e1551b1caeab96f7f3c159075756f5f8aaa05b6f87f581d802903c36d84169f87c01a77c43284adf66daa38fe8633a8f6d258231466aba89bb9e56ef289d2815cde433e381ddf74852302684b974c80d0a1d7d578039120a2db36213d167687e9390ed8f14c709d3149d2f5cfbc42a9b85e6d10c4ee77270534fdff2ccb816dedc6377257a73ba2a4ea82c0cc4a81bfb939f710b109beb279edeeda345867c0130ff6fc0fbbbbd25d16d2e73f6ab2fbb4c72abc0713ef9e4690b72208c468fa64c21f2247867b5edd514e5be23733ef2136b10e03161fe3b4e6018933df1f97a8250747e6140780a064e0c35bfafcb8177a75e8fde25e61e1c6332bbf7ddc3a11910feb3bd66707a7c1a9f87b320298acec88135a177b2f6f3c0ff02765f34c30b078b58470cd227ce4c0a1a77e662180b28fb360c3fbf47ba8982510979b86332b6d8a53d5f3947665c119a71e5b6ddc64228b47c3e23c30ebbede354d71d2780456de3f717a384112ccc9805dfe107fc9440ab7abccd8463150ce1306778fbea9793d88c4a7864a925bac5da593d6b72ee2c743f0b732a10285d293359c8126004f06fa12c0b3ca9c1e9c0a75b587207965c49f7450cb4210da01e5604f83f849b7eb9cb3f73de4bcff064b4fe08580971184db940bf29d6b8cea027dbfe0b78d6fa574da4b5b4db57b4e1939213c9848537baaf8076b1db4e767a467ec6a47c67aa33df96d7113cc9884124bca5579ce0058618b1b1f13ff3b102ff54e6bde7e29fd555acd6ffe6429af27301b6bbda38403d8f6f3266d7724f517778e7b2c1e13e3d83104ce743857c07b70cb5be8d6d8757d63a3bedfc9de79cc7e96c9833e15fd65f1336178568c2453a49cfc8c8dcb4f5d978f0a6144d62a5da6fd75d08a41084d4c59b345e07a5f4446af6e5216cc8d94347d333030015e262036f0a4fa6d2ae523654c55b37ac179efb66d230de5c70a9b33738ef0cd4ba2710d9ec03f426701101182051516a9be380a07e2555a8cda03eafc72d2bc2bc1dcadde4bb819692c1736b0ed203c4934842d791aae9e10bf239cc5393c9faf967109444c8f44532766ca5481f0ac16d25753a121727271c71d97b401dafe91588b362f2798f047deece9f860624b2d5753e46f929f8c2d03753e7245ffed6d8e36c7b380c4fb6a27e087a38b5e4a80f0043f95e5a20701c62692e684a764074e47badcecf8b2145be47b5b7089c249abcf0743a61b517004d120929d7846a39a46e0ddbcf5334fc01aca0bff31e67da8b3c88e38504db1dc3940c55bee158ae6dfcce289cf91106397d8e3990149a86c819e0354d785a4eed76fa6380491b01efbc23e7189ec253884d384865bca5da9a0917d68144a0a02cde867c365d339a025b7c8a16b82e341719a259ede8f09c165a354fd3e8f5d59e349e7c36302cf8ed115537969b598337fe7575157c89a254c0829cdb243d3d788321c756bf2817721db4bead96e1f25be5b8c7100d149d13900b6c6491ddbdbeaef7753ed5c5d9b07449bfd023501075ec08c37c13df696bf73500bd440a6522f5b955862c5eee8dc6e875c5055350b3397a6b31d2b764308ad24aafd4113af76f38f4aaba9e24efa3ad5b1c008a6cad2411ef6c7276dd3a5a2ae8130f91c36c34137731426fdf5272ef4ac5c4415e2d0f7b50da3ae910ba22bb5b962351e841746b00"}) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(0xffffffffffffffff, 0x81f8943c, &(0x7f0000000540)={0x0, ""/256, 0x0, 0x0}) ioctl$BTRFS_IOC_INO_LOOKUP_USER(0xffffffffffffffff, 0xd000943e, &(0x7f0000001800)={0x0, 0x0, "02b66c11beed7c64705a2327ec95767e4393a580b2c3043a2fcfb08839b8897467f4a525091f0f7d2480b2fbdcfd2a3924b674e8aed38628fb035a463934e151ef7c0289d4fd7b308135026f77657ca78a849330727be579703bef5f51cf16a7198f8eb8e962cc55e47a88645dc99b6e4dfd15399b64e979124ef3a9120208d05d9de3ff1ce9cc9e6353b97b13c914e3530a6ec5b967674f3cbc69538c66356f6777af618dd96e1730048727e164fcc8139776c1e5061154ffe7838008ce90ccbf0827c03a28016d5f3bdabbc98bee9c405509e3e094ba1677d6b347061c346722828810ba1b68424c585770f6527f3856630aedb97f2ee0742e013d5d412046", "e26481ed1e7c639b5947fa03672a9556f2d9c88f35f8f8b62d6b01c1aef3d08f4ee43881217f959db47d280e8448925694f755ec0256840e58a31c14f0d78d223c58da8e0bd812fb893403e655823624c9e0581484207a6d914ad9befaa148971274f98aa0b753b38761ffcd20135aa09bee95ffc38cfb410de6eb0b1c0eaf69af8375bb982d21281acaa2966378f31ed037b8789d3bf55cda6f1fdefac7c7d4eec101525b850f72c5d3515de41ea6c4cc0a1d4c9d0e83fe98d8baa6325482d6270833cf890aba6768abf6a6ac45c0268bac824f692a521bf8500ce437d7bd4ecafbf918c063d8af3d110e24ddc569f535794d4c8c4ab3897b27310c8d39efca731b7a22caf0ec5f2df04ce6b496582e72b5cbb10480d59c191cd3eca3d3a973fe653216cd08c8de4098133f85da499af2f6a6c7513755f40d13810388f5476a67bb722dc832e4af4c76fda32aa9699c8191a644df90df4b2b2e7993e90bee48e3b65cbc84de44a15926c157018e46c849d57933b96f67dcec40eb733515b880aabbfd1324433ef61e0a112430d3829717820a9eb79c8767614facf386e07a7df2ceb2e9f9d33d65d33fecc7697f5bf74769a67ac297756ec495eaf87674ad5fae2fd772301896e85c617328f32f69bb718bf1ade1d8fd637a6bb4c7044929bf43757821b2bde4ce2ec164ef3841458207326249547ccd2c3ca3467e8c5474cb820ee8647c90d2da6af054fa1f25afb1b0792dcf21b2736c67fc8119a6bdbe19689bb639113f5b9165a50b6f2df8dd8c549220488da3bb10e45dcc7a6207635a550e5ee913aa8ec6ea5f92ba59eb003424e6ea472df633220c8e9644d5bf2f4b01027fc5c0422c39932531e1be7e62e021ac4592b57d95720d5ae86f4bd11d95ad82569ab24e0d1b5a144e6fbcc76c4d7800a70069a852979a3a1f02fcbef6bfeff2101576c998109c65350175bd05435d4be236097340cfafadfde768b1176e6c40f34731164afcd0c3620a0cd015494e432e9aff2c59a4cf476d9037a398183fe74789da0300172e1c02173ff867faf6fbed165ca819be3e4ea05fb6ff1633430984bece64ef93ff8d012b9c321941d59f5b8572008f6bb22597864ea1fbd6b9e349b6d9dc1adc185ea32d5e67e44ac5ebea2063635a53e1718a4246ea47a8874daae0ac6653f821f381a940cbfc98d4a25aea33c63309ee1c6c20f349b673509f09b5381956611359754bdb2eeda100ccb4834596de45556611d5b568f2604653c2129a7d5bd50a209a6d4a956c108b5349a8d208431a2edb17d7650332d308ea2e6f324e589f0e98de49ab655a77509637f0a65614c33f16f91e0d6305e8f9ba0766154e1599f67005ff75af7a6143e574acb799f2363dbb37971fc451c0054b5753de3e2278afe06b9e64b93ab09b4985fe26626a6266da1088f7b9755ac8a9e4e6e99a0dedfe2b1509de12f75d9dea83475543d2b328e986779f4992c870ad128b9d09e8ebcc264e8bb5b85d6d62ab902b8ed7409448cc26a777882627bf3e0c9cdd473289154560c2838c6d4fdaea97ff5d7992909c67710dfeb4977ea7bc06d77b06a19efa42178c46a3fa66ed7d6e2b67ab86be5d94721b099947563db706c5c3a12744ea61d63fce93f546f2669c50b6568e3f32d79bfc75adefc21ee93c5c5360cf2e7ac19571c3663857baf3325b666570642da4dcb309dab05b7ad05bf832d28fa8e871f7b89d6f4327953f4dbde3aac022f4dfe050d0924427e39a8027fbc4b54c4a8c2bf35f8c11c9c0ac4bc1fc31d6bdb2cee2675c5a782aa3ab5ffcc7be7207c89cdb8546fcfdd3a5aadfee5a296abd9afc127ebbf580649e9132b55d9f40a3778af49248b593e00c9f4812b8f7adb7323ab7852e4ed09c55aa356b2e8fee6eb14be3659fdf821d23ac556845f70670d8b6a417c29ac0b1585ea865634c0c921e9d930d4018f66d1e024179d80a7154c491c8662d427ab44a3633480782889f1d00c1d182ede30d4127d769291ef408b574a41c29153b7d949d4648b60a4faf32b380dbc87146c36479d47023876abd4ae7e289ba79d988f6c3a86a75bdc784bea2f0b7e2f77cb1282f54a18e1117e50ffa46ac208fcf8a7b5751e83b3e80cb428e4c2ae63ca3cf7b2c4353303edfe328626391f7925f1ab4ef1c7d0f54d0e45590188ecdb6d2c9c0db9b0552dc81b21dc1bbe9e94be5f78dad11f53d6fc3602e9ffb872d3557c44a133ee94e50d57d5eae2214569975982c63f22750db6f5c979ed81b3f68fe6be6daa85bcd98f25548c1d4db1f15394bc708933e0352e4059bedbf832abaf75e9bdca6fea93600357ca4153357d2e0661a308edf82c0f53e7a121c7e1e8bafdc5fdb61c21f4716d06b43a8395aa915a34c4dc4b5b15ba70a4e163728a9966515682b9ca3c2499aeb17a3c17f905fefed806b504f77c52564006273282db5e0c70565ee1016fc7c241830bef951294db9682ef41742b6550246e539143f15d6c2f017a8083ec97eb3a2de8bc2d8d5fec2b9b88996b3ff6c5aa5bed326f72fe9190de74a83e380b9fe89324421697d124d9a1bd3ced8e1856923147958582d737a36da6af4a0fd92b83f0ef0cb1a725d3a5ecb3a39ad039d200989a281a0686336457824ef582698222b7a063475b793a21745d6701940a0baf124461ad71de8dff6a6f7ed676d107e01ae6b7b79aa1f96efce91039dead977bf70365de8ebb3ec06634246d62f78287831984accef27048794468f520db5c71b4fa94818ddc7394fa1b609adb8cc80c0f32efac26a47bd74119100f0cfdcb889aab1019786cc0eefc1e4295ae919e9f6c8c8a16aa76a2bfe39adf5929e9b9925da0241e734378fe140981e3536bf0b77eb0263297a936a5f37605f128d79b16723b953331f99633b8d29ad1d1dbdb74188488ea0d1b7e0ddb652c040aa0477a079e92618e52f3b7ceebd62e0f0c6946469c19ae828b7eed288c3a53320c9d5468c39d608bd42d967a21e6b788de7c6826d1b99130083182562f63443290aeeb24308d3eb4881547db34b284d9af2bd7cac0d2f66bd14758c079c345d2bce3e1efc3599b0d06e69a92db7e05473673726e1848df75e83df50f98a9321468c10c6514dd7b3cb5b0ddf2fef13284463fe88ef2bc95d51288e41e3e0ba5c91bd686d7f7658b5bac8e3991a0f3b36f004585d9edd09d478ede73e7da067ea502894fc1247e62c1a84c9065ffccc3da96f07ddce135ceafdd784dda6f64a7add400d21ae13abf98e90fb96fcda23a8ad79905428a349b2230c19cf8cdca1724382bd19b4b075438098bd46cbc668bcbdcf1da85f733a50669f976a7106ff1936f20d799e7d01b0beeba7057a90016fb2d36bb3d14e11ac077ffb91f139d16ae5e78d84559312c9fd1a91ff70e5d9b4fd279420f7647151fe951b705082230ad20415d2f605665cd9374a50f7fc3c32efe30e4c0fc84f0c0e0bcd35e46665e4f29371c1c96324f65a94c85874e8e3baba68b88acc85c38f466d7353b5a00953c8ff5522cf0903e646301e3539d047286d10be16d9fbef02d450b7b12b1ead250f68c4c893fbc6f48765f34a81c477966278a1c6945d14d6531f0b5e4cef4edaf3708a5787162c39b236272694b0a302465d01004fa9d516414c040c27fccbf38466023b06293bd07f31fcdbb3b5378a8b2c7886e1551b1caeab96f7f3c159075756f5f8aaa05b6f87f581d802903c36d84169f87c01a77c43284adf66daa38fe8633a8f6d258231466aba89bb9e56ef289d2815cde433e381ddf74852302684b974c80d0a1d7d578039120a2db36213d167687e9390ed8f14c709d3149d2f5cfbc42a9b85e6d10c4ee77270534fdff2ccb816dedc6377257a73ba2a4ea82c0cc4a81bfb939f710b109beb279edeeda345867c0130ff6fc0fbbbbd25d16d2e73f6ab2fbb4c72abc0713ef9e4690b72208c468fa64c21f2247867b5edd514e5be23733ef2136b10e03161fe3b4e6018933df1f97a8250747e6140780a064e0c35bfafcb8177a75e8fde25e61e1c6332bbf7ddc3a11910feb3bd66707a7c1a9f87b320298acec88135a177b2f6f3c0ff02765f34c30b078b58470cd227ce4c0a1a77e662180b28fb360c3fbf47ba8982510979b86332b6d8a53d5f3947665c119a71e5b6ddc64228b47c3e23c30ebbede354d71d2780456de3f717a384112ccc9805dfe107fc9440ab7abccd8463150ce1306778fbea9793d88c4a7864a925bac5da593d6b72ee2c743f0b732a10285d293359c8126004f06fa12c0b3ca9c1e9c0a75b587207965c49f7450cb4210da01e5604f83f849b7eb9cb3f73de4bcff064b4fe08580971184db940bf29d6b8cea027dbfe0b78d6fa574da4b5b4db57b4e1939213c9848537baaf8076b1db4e767a467ec6a47c67aa33df96d7113cc9884124bca5579ce0058618b1b1f13ff3b102ff54e6bde7e29fd555acd6ffe6429af27301b6bbda38403d8f6f3266d7724f517778e7b2c1e13e3d83104ce743857c07b70cb5be8d6d8757d63a3bedfc9de79cc7e96c9833e15fd65f1336178568c2453a49cfc8c8dcb4f5d978f0a6144d62a5da6fd75d08a41084d4c59b345e07a5f4446af6e5216cc8d94347d333030015e262036f0a4fa6d2ae523654c55b37ac179efb66d230de5c70a9b33738ef0cd4ba2710d9ec03f426701101182051516a9be380a07e2555a8cda03eafc72d2bc2bc1dcadde4bb819692c1736b0ed203c4934842d791aae9e10bf239cc5393c9faf967109444c8f44532766ca5481f0ac16d25753a121727271c71d97b401dafe91588b362f2798f047deece9f860624b2d5753e46f929f8c2d03753e7245ffed6d8e36c7b380c4fb6a27e087a38b5e4a80f0043f95e5a20701c62692e684a764074e47badcecf8b2145be47b5b7089c249abcf0743a61b517004d120929d7846a39a46e0ddbcf5334fc01aca0bff31e67da8b3c88e38504db1dc3940c55bee158ae6dfcce289cf91106397d8e3990149a86c819e0354d785a4eed76fa6380491b01efbc23e7189ec253884d384865bca5da9a0917d68144a0a02cde867c365d339a025b7c8a16b82e341719a259ede8f09c165a354fd3e8f5d59e349e7c36302cf8ed115537969b598337fe7575157c89a254c0829cdb243d3d788321c756bf2817721db4bead96e1f25be5b8c7100d149d13900b6c6491ddbdbeaef7753ed5c5d9b07449bfd023501075ec08c37c13df696bf73500bd440a6522f5b955862c5eee8dc6e875c5055350b3397a6b31d2b764308ad24aafd4113af76f38f4aaba9e24efa3ad5b1c008a6cad2411ef6c7276dd3a5a2ae8130f91c36c34137731426fdf5272ef4ac5c4415e2d0f7b50da3ae910ba22bb5b962351e841746b"}) (async) ioctl$BTRFS_IOC_INO_LOOKUP_USER(0xffffffffffffffff, 0xd000943e, &(0x7f0000001800)={0x0, 0x0, "02b66c11beed7c64705a2327ec95767e4393a580b2c3043a2fcfb08839b8897467f4a525091f0f7d2480b2fbdcfd2a3924b674e8aed38628fb035a463934e151ef7c0289d4fd7b308135026f77657ca78a849330727be579703bef5f51cf16a7198f8eb8e962cc55e47a88645dc99b6e4dfd15399b64e979124ef3a9120208d05d9de3ff1ce9cc9e6353b97b13c914e3530a6ec5b967674f3cbc69538c66356f6777af618dd96e1730048727e164fcc8139776c1e5061154ffe7838008ce90ccbf0827c03a28016d5f3bdabbc98bee9c405509e3e094ba1677d6b347061c346722828810ba1b68424c585770f6527f3856630aedb97f2ee0742e013d5d412046", "e26481ed1e7c639b5947fa03672a9556f2d9c88f35f8f8b62d6b01c1aef3d08f4ee43881217f959db47d280e8448925694f755ec0256840e58a31c14f0d78d223c58da8e0bd812fb893403e655823624c9e0581484207a6d914ad9befaa148971274f98aa0b753b38761ffcd20135aa09bee95ffc38cfb410de6eb0b1c0eaf69af8375bb982d21281acaa2966378f31ed037b8789d3bf55cda6f1fdefac7c7d4eec101525b850f72c5d3515de41ea6c4cc0a1d4c9d0e83fe98d8baa6325482d6270833cf890aba6768abf6a6ac45c0268bac824f692a521bf8500ce437d7bd4ecafbf918c063d8af3d110e24ddc569f535794d4c8c4ab3897b27310c8d39efca731b7a22caf0ec5f2df04ce6b496582e72b5cbb10480d59c191cd3eca3d3a973fe653216cd08c8de4098133f85da499af2f6a6c7513755f40d13810388f5476a67bb722dc832e4af4c76fda32aa9699c8191a644df90df4b2b2e7993e90bee48e3b65cbc84de44a15926c157018e46c849d57933b96f67dcec40eb733515b880aabbfd1324433ef61e0a112430d3829717820a9eb79c8767614facf386e07a7df2ceb2e9f9d33d65d33fecc7697f5bf74769a67ac297756ec495eaf87674ad5fae2fd772301896e85c617328f32f69bb718bf1ade1d8fd637a6bb4c7044929bf43757821b2bde4ce2ec164ef3841458207326249547ccd2c3ca3467e8c5474cb820ee8647c90d2da6af054fa1f25afb1b0792dcf21b2736c67fc8119a6bdbe19689bb639113f5b9165a50b6f2df8dd8c549220488da3bb10e45dcc7a6207635a550e5ee913aa8ec6ea5f92ba59eb003424e6ea472df633220c8e9644d5bf2f4b01027fc5c0422c39932531e1be7e62e021ac4592b57d95720d5ae86f4bd11d95ad82569ab24e0d1b5a144e6fbcc76c4d7800a70069a852979a3a1f02fcbef6bfeff2101576c998109c65350175bd05435d4be236097340cfafadfde768b1176e6c40f34731164afcd0c3620a0cd015494e432e9aff2c59a4cf476d9037a398183fe74789da0300172e1c02173ff867faf6fbed165ca819be3e4ea05fb6ff1633430984bece64ef93ff8d012b9c321941d59f5b8572008f6bb22597864ea1fbd6b9e349b6d9dc1adc185ea32d5e67e44ac5ebea2063635a53e1718a4246ea47a8874daae0ac6653f821f381a940cbfc98d4a25aea33c63309ee1c6c20f349b673509f09b5381956611359754bdb2eeda100ccb4834596de45556611d5b568f2604653c2129a7d5bd50a209a6d4a956c108b5349a8d208431a2edb17d7650332d308ea2e6f324e589f0e98de49ab655a77509637f0a65614c33f16f91e0d6305e8f9ba0766154e1599f67005ff75af7a6143e574acb799f2363dbb37971fc451c0054b5753de3e2278afe06b9e64b93ab09b4985fe26626a6266da1088f7b9755ac8a9e4e6e99a0dedfe2b1509de12f75d9dea83475543d2b328e986779f4992c870ad128b9d09e8ebcc264e8bb5b85d6d62ab902b8ed7409448cc26a777882627bf3e0c9cdd473289154560c2838c6d4fdaea97ff5d7992909c67710dfeb4977ea7bc06d77b06a19efa42178c46a3fa66ed7d6e2b67ab86be5d94721b099947563db706c5c3a12744ea61d63fce93f546f2669c50b6568e3f32d79bfc75adefc21ee93c5c5360cf2e7ac19571c3663857baf3325b666570642da4dcb309dab05b7ad05bf832d28fa8e871f7b89d6f4327953f4dbde3aac022f4dfe050d0924427e39a8027fbc4b54c4a8c2bf35f8c11c9c0ac4bc1fc31d6bdb2cee2675c5a782aa3ab5ffcc7be7207c89cdb8546fcfdd3a5aadfee5a296abd9afc127ebbf580649e9132b55d9f40a3778af49248b593e00c9f4812b8f7adb7323ab7852e4ed09c55aa356b2e8fee6eb14be3659fdf821d23ac556845f70670d8b6a417c29ac0b1585ea865634c0c921e9d930d4018f66d1e024179d80a7154c491c8662d427ab44a3633480782889f1d00c1d182ede30d4127d769291ef408b574a41c29153b7d949d4648b60a4faf32b380dbc87146c36479d47023876abd4ae7e289ba79d988f6c3a86a75bdc784bea2f0b7e2f77cb1282f54a18e1117e50ffa46ac208fcf8a7b5751e83b3e80cb428e4c2ae63ca3cf7b2c4353303edfe328626391f7925f1ab4ef1c7d0f54d0e45590188ecdb6d2c9c0db9b0552dc81b21dc1bbe9e94be5f78dad11f53d6fc3602e9ffb872d3557c44a133ee94e50d57d5eae2214569975982c63f22750db6f5c979ed81b3f68fe6be6daa85bcd98f25548c1d4db1f15394bc708933e0352e4059bedbf832abaf75e9bdca6fea93600357ca4153357d2e0661a308edf82c0f53e7a121c7e1e8bafdc5fdb61c21f4716d06b43a8395aa915a34c4dc4b5b15ba70a4e163728a9966515682b9ca3c2499aeb17a3c17f905fefed806b504f77c52564006273282db5e0c70565ee1016fc7c241830bef951294db9682ef41742b6550246e539143f15d6c2f017a8083ec97eb3a2de8bc2d8d5fec2b9b88996b3ff6c5aa5bed326f72fe9190de74a83e380b9fe89324421697d124d9a1bd3ced8e1856923147958582d737a36da6af4a0fd92b83f0ef0cb1a725d3a5ecb3a39ad039d200989a281a0686336457824ef582698222b7a063475b793a21745d6701940a0baf124461ad71de8dff6a6f7ed676d107e01ae6b7b79aa1f96efce91039dead977bf70365de8ebb3ec06634246d62f78287831984accef27048794468f520db5c71b4fa94818ddc7394fa1b609adb8cc80c0f32efac26a47bd74119100f0cfdcb889aab1019786cc0eefc1e4295ae919e9f6c8c8a16aa76a2bfe39adf5929e9b9925da0241e734378fe140981e3536bf0b77eb0263297a936a5f37605f128d79b16723b953331f99633b8d29ad1d1dbdb74188488ea0d1b7e0ddb652c040aa0477a079e92618e52f3b7ceebd62e0f0c6946469c19ae828b7eed288c3a53320c9d5468c39d608bd42d967a21e6b788de7c6826d1b99130083182562f63443290aeeb24308d3eb4881547db34b284d9af2bd7cac0d2f66bd14758c079c345d2bce3e1efc3599b0d06e69a92db7e05473673726e1848df75e83df50f98a9321468c10c6514dd7b3cb5b0ddf2fef13284463fe88ef2bc95d51288e41e3e0ba5c91bd686d7f7658b5bac8e3991a0f3b36f004585d9edd09d478ede73e7da067ea502894fc1247e62c1a84c9065ffccc3da96f07ddce135ceafdd784dda6f64a7add400d21ae13abf98e90fb96fcda23a8ad79905428a349b2230c19cf8cdca1724382bd19b4b075438098bd46cbc668bcbdcf1da85f733a50669f976a7106ff1936f20d799e7d01b0beeba7057a90016fb2d36bb3d14e11ac077ffb91f139d16ae5e78d84559312c9fd1a91ff70e5d9b4fd279420f7647151fe951b705082230ad20415d2f605665cd9374a50f7fc3c32efe30e4c0fc84f0c0e0bcd35e46665e4f29371c1c96324f65a94c85874e8e3baba68b88acc85c38f466d7353b5a00953c8ff5522cf0903e646301e3539d047286d10be16d9fbef02d450b7b12b1ead250f68c4c893fbc6f48765f34a81c477966278a1c6945d14d6531f0b5e4cef4edaf3708a5787162c39b236272694b0a302465d01004fa9d516414c040c27fccbf38466023b06293bd07f31fcdbb3b5378a8b2c7886e1551b1caeab96f7f3c159075756f5f8aaa05b6f87f581d802903c36d84169f87c01a77c43284adf66daa38fe8633a8f6d258231466aba89bb9e56ef289d2815cde433e381ddf74852302684b974c80d0a1d7d578039120a2db36213d167687e9390ed8f14c709d3149d2f5cfbc42a9b85e6d10c4ee77270534fdff2ccb816dedc6377257a73ba2a4ea82c0cc4a81bfb939f710b109beb279edeeda345867c0130ff6fc0fbbbbd25d16d2e73f6ab2fbb4c72abc0713ef9e4690b72208c468fa64c21f2247867b5edd514e5be23733ef2136b10e03161fe3b4e6018933df1f97a8250747e6140780a064e0c35bfafcb8177a75e8fde25e61e1c6332bbf7ddc3a11910feb3bd66707a7c1a9f87b320298acec88135a177b2f6f3c0ff02765f34c30b078b58470cd227ce4c0a1a77e662180b28fb360c3fbf47ba8982510979b86332b6d8a53d5f3947665c119a71e5b6ddc64228b47c3e23c30ebbede354d71d2780456de3f717a384112ccc9805dfe107fc9440ab7abccd8463150ce1306778fbea9793d88c4a7864a925bac5da593d6b72ee2c743f0b732a10285d293359c8126004f06fa12c0b3ca9c1e9c0a75b587207965c49f7450cb4210da01e5604f83f849b7eb9cb3f73de4bcff064b4fe08580971184db940bf29d6b8cea027dbfe0b78d6fa574da4b5b4db57b4e1939213c9848537baaf8076b1db4e767a467ec6a47c67aa33df96d7113cc9884124bca5579ce0058618b1b1f13ff3b102ff54e6bde7e29fd555acd6ffe6429af27301b6bbda38403d8f6f3266d7724f517778e7b2c1e13e3d83104ce743857c07b70cb5be8d6d8757d63a3bedfc9de79cc7e96c9833e15fd65f1336178568c2453a49cfc8c8dcb4f5d978f0a6144d62a5da6fd75d08a41084d4c59b345e07a5f4446af6e5216cc8d94347d333030015e262036f0a4fa6d2ae523654c55b37ac179efb66d230de5c70a9b33738ef0cd4ba2710d9ec03f426701101182051516a9be380a07e2555a8cda03eafc72d2bc2bc1dcadde4bb819692c1736b0ed203c4934842d791aae9e10bf239cc5393c9faf967109444c8f44532766ca5481f0ac16d25753a121727271c71d97b401dafe91588b362f2798f047deece9f860624b2d5753e46f929f8c2d03753e7245ffed6d8e36c7b380c4fb6a27e087a38b5e4a80f0043f95e5a20701c62692e684a764074e47badcecf8b2145be47b5b7089c249abcf0743a61b517004d120929d7846a39a46e0ddbcf5334fc01aca0bff31e67da8b3c88e38504db1dc3940c55bee158ae6dfcce289cf91106397d8e3990149a86c819e0354d785a4eed76fa6380491b01efbc23e7189ec253884d384865bca5da9a0917d68144a0a02cde867c365d339a025b7c8a16b82e341719a259ede8f09c165a354fd3e8f5d59e349e7c36302cf8ed115537969b598337fe7575157c89a254c0829cdb243d3d788321c756bf2817721db4bead96e1f25be5b8c7100d149d13900b6c6491ddbdbeaef7753ed5c5d9b07449bfd023501075ec08c37c13df696bf73500bd440a6522f5b955862c5eee8dc6e875c5055350b3397a6b31d2b764308ad24aafd4113af76f38f4aaba9e24efa3ad5b1c008a6cad2411ef6c7276dd3a5a2ae8130f91c36c34137731426fdf5272ef4ac5c4415e2d0f7b50da3ae910ba22bb5b962351e841746b"}) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(0xffffffffffffffff, 0xd000943d, &(0x7f00000745c0)={0x5af, [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r17}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r15}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r19, r18}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r18}, {}, {0x0, r14}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r16}, {}, {}, {}, {}, {}, {0x0, r14}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r18}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r16}, {}, {}, {}, {}, {}, {}, {0x0, r15}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r16}], 0x81, "7464fbe08eb369"}) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(r3, 0xd000943d, &(0x7f0000043f80)={0xffff, [{r4, r6}, {r5, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {0x0, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r4, r6}, {r5}, {r5, r6}, {0x0, r6}, {r4, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {0x0, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {0x0, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {0x0, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r4}, {r5}, {0x0, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r4, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r4}, {r4, r6}, {r4}, {r5}, {r4, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r4, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {0x0, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {r4, r6}, {r5}, {0x0, r6}, {0x0, r6}, {r5, r6}, {r5, r6}, {0x0, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {0x0, r6}, {r4, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r5}, {r4, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {0x0, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r5}, {r4, r6}, {r4, r6}, {0x0, r6}, {r5, r6}, {0x0, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r4}, {0x0, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r5}, {r5, r6}, {r5, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {0x0, r6}, {0x0, r6}, {r4, r6}, {r5}, {r5}, {r4, r6}, {r4, r6}, {0x0, r6}, {r5, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {r4, r6}, {r4}, {r5}, {r4, r6}, {0x0, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {r4, r6}, {0x0, r6}, {0x0, r6}, {r5, r6}, {r4}, {r5, r6}, {r4}, {r4}, {r4, r6}, {r4, r6}, {r5, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r4}, {r5, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r5}, {r5, r6}, {r4}, {r4, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r4}, {r4, r6}, {r4, r6}, {r5, r6}], 0xa4, "c71a4b87d907f8"}) r21 = socket$netlink(0x10, 0x3, 0xf) ioctl$sock_SIOCSIFVLAN_GET_VLAN_VID_CMD(r21, 0x8983, &(0x7f0000000040)) (async) ioctl$sock_SIOCSIFVLAN_GET_VLAN_VID_CMD(r21, 0x8983, &(0x7f0000000040)) ioctl$sock_SIOCGIFVLAN_DEL_VLAN_CMD(r21, 0x8982, 0x0) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(r21, 0x81f8943c, &(0x7f0000002c40)={0x0, ""/256, 0x0, 0x0}) ioctl$BTRFS_IOC_INO_LOOKUP_USER(0xffffffffffffffff, 0xd000943e, &(0x7f0000001800)={0x0, r22, "02b66c11beed7c64705a2327ec95767e4393a580b2c3043a2fcfb08839b8897467f4a525091f0f7d2480b2fbdcfd2a3924b674e8aed38628fb035a463934e151ef7c0289d4fd7b308135026f77657ca78a849330727be579703bef5f51cf16a7198f8eb8e962cc55e47a88645dc99b6e4dfd15399b64e979124ef3a9120208d05d9de3ff1ce9cc9e6353b97b13c914e3530a6ec5b967674f3cbc69538c66356f6777af618dd96e1730048727e1752cc8139776c1e5061154ffe7838008ce90ccbf0827c03a28016d5f3bdabbc98bee9c405509e3e094ba1677d6b347061c346722828810ba1b68424c585770f6527f3856630aedb97f2ed5ff2e013d5d412046", "e26481ed1e7c639b5947fa03672a9556f2d9c88f35f8f8b62d6b01c1aef3d08f4ee43881217f959db47d280e8448925694f755ec0256840e58a31c14f0d78d223c58da8e0bd812fb893403e655823624c9e0581484207a6d914ad9befaa148971274f98aa0b753b38761ffcd20135aa09bee95ffc38cfb410de6eb0b1c0eaf69af8375bb982d21281acaa2966309f31ed037b8789d3bf55cda6f1fdefac7c7d4eec101525b850f72c5d3515de41ea6c4cc0a1d4c9d0e83fe98d8baa6325482d6270833cf890aba6768abf6a6ac45c0268bac824f692a521bf8500ce437d7bd4ecafbf918c063d8af3d110e24ddc569f535794d4c8c4ab3897b27310c8d39efca731b7a22caf0ec5f2df04ce6b496582e72b5cbb10480d59c191cd3eca3d3a973fe653216cd08c8de4098133f85da499af2f6a6c7513755f40d13810388f5476a67bb722dc832e4af4c76fda32aa9699c8191a644df90df4b2b2e7993e90bee48e3b65cbc84de44a15926c157018e46c849d57933b96f67dcec40eb733515b880aabbfd1324433ef61e0a112430d3829717820a9eb79c8767614facf386e07a7df2ceb2e9f9d33d65d33fecc7697f5bf74769a67ac297756ec495eaf87674ad5fae2fd772301896e85c617328f32f69bb718bf1ade1d8fd637a6bb4c7044929bf43757821b2bde4ce2ec164ef3841458207326249547ccd2c3ca3467e8c5474cb820ee8647c90d2da6af054fa1f25afb1b0792dcf21b2736c67fc8119a6bdbe19689bb639113f5b9165a50b6f2df8dd8c549220488da3bb10e45dcc7a6207635a550e5ee913aa8ec6ea5f92ba59eb003424e6ea472df633220c8e9644d5bf2f4b01027fc5c0422c39932531e1be7e62e021ac4592b57d95720d5ae86f4bd11d95ad82569ab24e0d1b5a144e6fbcc76c4d7800a70069a852979a3a1f02fcbef6bfeff2101576c998109c65350175bd05435d4be236097340cfafadfde768b1176e6c40f34731164afcd0c3620a0cd015494e432e9aff2c59a4cf476d9037a398183fe74789da0300172e1c02173ff867faf6fbed165ca819be3e4ea05fb6ff1633430984bece64ef93ff8d012b9c321941d59f5b8572008f6bb22597864ea1fbd6b9e349b6d9dc1adc185ea32d5e67e44ac5ebea2063635a53e1718a4246ea47a8874daae0ac6653f821f381a940cbfc98d4a25aea33c63309ee1c6c20f349b673509f09b5381956611359754bdb2eeda100ccb4834596de45556611d5b568f2604653c2129a7d5bd50a209a6d4a956c108b5349a8d208431a2edb17d7650332d308ea2e6f324e589f0e98de49ab655a77509637f0a65614c33f16f91e0d6305e8f9ba0766154e1599f67005ff75af7a6143e574acb799f2363dbb37971fc451c0054b5753de3e2278afe06b9e64b93ab09b4985fe26626a6266da1088f7b9755ac8a9e4e6e99a0dedfe2b1509de12f75d9dea83475543d2b328e986779f4992c870ad128b9d09e8ebcc264e8bb5b85d6d62ab902b8ed7409448cc26a777882627bf3e0c9cdd473289154560c2838c6d4fdaea97ff5d7992909c67710dfeb4977ea7bc06d77b06a19efa42178c46a3fa66ed7d6e2b67ab86be5d94721b099947563db706c5c3a12744ea61d63fce93f546f2669c50b6568e3f32d79bfc75adefc21ee93c5c5360cf2e7ac19571c3663857baf3325b666570642da4dcb309dab05b7ad05bf832d28fa8e871f7b89d6f4327953f4dbde3aac022f4dfe050d0924427e39a8027fbc4b54c4a8c2bf35f8c11c9c0ac4bc1fc31d6bdb2cee2675c5a782aa3ab5ffcc7be7207c89cdb8546fcfdd3a5aadfee5a296abd9afc127ebbf580649e9132b55d9f40a3778af49248b593e00c9f4812b8f7adb7323ab7852e4ed09c55aa356b2e8fee6eb14be3659fdf821d23ac556845f70670d8b6a417c29ac0b1585ea865634c0c921e9d930d4018f66d1e024179d80a7154c491c8662d427ab44a3633480782889f1d00c1d182ede30d4127d769291ef408b574a41c29153b7d949d4648b60a4faf32b380dbc87146c36479d47023876abd4ae7e289ba79d988f6c3a86a75bdc784bea2f0b7e2f77cb1282f54a18e1117e50ffa46ac208fcf8a7b5751e83b3e80cb428e4c2ae63ca3cf7b2c4353303edfe328626391f7925f1ab4ef1c7d0f54d0e45590188ecdb6d2c9c0db9b0552dc81b21dc1bbe9e94be5f78dad11f53d6fc3602e9ffb872d3557c44a133ee94e50d57d5eae2214569975982c63f22750db6f5c979ed81b3f68fe6be6daa85bcd98f25548c1d4db1f15394bc708933e0352e4059bedbf832abaf75e9bdca6fea93600357ca4153357d2e0661a308edf82c0f53e7a121c7e1e8bafdc5fdb61c21f4716d06b43a8395aa915a34c4dc4b5b15ba70a4e163728a9966515682b9ca3c2499aeb17a3c17f905fefed806b504f77c52564006273282db5e0c70565ee1016fc7c241830bef951294db9682ef41742b6550246e539143f15d6c2f017a8083ec97eb3a2de8bc2d8d5fec2b9b88996b3ff6c5aa5bed326f72fe9190de74a83e380b9fe89324421697d124d9a1bd3ced8e1856923147958582d737a36da6af4a0fd92b83f0ef0cb1a725d3a5ecb3a39ad039d200989a281a0686336457824ef582698222b7a063475b793a21745d6701940a0baf124461ad71de8dff6a6f7ed676d107e01ae6b7b79aa1f96efce91039dead977bf70365de8ebb3ec06634246d62f78287831984accef27048794468f520db5c71b4fa94818ddc7394fa1b609adb8cc80c0f32efac26a47bd74119100f0cfdcb889aab1019786cc0eefc1e4295ae919e9f6c8c8a16aa76a2bfe39adf5929e9b9925da0241e734378fe140981e3536bf0b77eb0263297a936a5f37605f128d79b16723b953331f99633b8d29ad1d1dbdb74188488ea0d1b7e0ddb652c040aa0477a079e92618e52f3b7ceebd62e0f0c6946469c19ae828b7eed288c3a53320c9d5468c39d608bd42d967a21e6b788de7c6826d1b99130083182562f63443290aeeb24308d3eb4881547db34b284d9af2bd7cac0d2f66bd14758c079c345d2bce3e1efc3599b0d06e69a92db7e05473673726e1848df75e83df50f98a9321468c10c6514dd7b3cb5b0ddf2fef13284463fe88ef2bc95d51288e41e3e0ba5c91bd686d7f7658b5bac8e3991a0f3b36f004585d9edd09d478ede73e7da067ea502894fc1247e62c1a84c9065ffccc3da96f07ddce135ceafdd784dda6f64a7add400d21ae13abf98e90fb96fcda23a8ad79905428a349b2230c19cf8cdca1724382bd19b4b075438098bd46cbc668bcbdcf1da85f733a50669f976a7106ff1936f20d799e7d01b0beeba7057a90016fb2d36bb3d14e11ac077ffb91f139d16ae5e78d84559312c9fd1a91ff70e5d9b4fd27940415d2f605665cd9374a50f7fc3c32efe30e4c0fc84f0c0e0bcd35e46665e4f29371c1c96324f65a94c85874e8e3baba68b88acc85c38f466d7353b5a00953c8ff5522cf0903e646301e3539d047286d10be16d9fbef02d450b7b12b1ead250f68c4c893fbc6f48765f34a81c477966278a1c6945d14d6531f0b5e4cef4edaf3708a5787162c39b236272694b0a302465d01004fa9d516414c040c27fccbf38466023b06293bd07f31fcdbb3b5378a8b2c7886e1551b1caeab96f7f3c159075756f5f8aaa05b6f87f581d802903c36d84169f87c01a77c43284adf66daa38fe8633a8f6d258231466aba89bb9e56ef289d2815cde433e381ddf74852302684b974c80d0a1d7d578039120a2db36213d167687e9390ed8f14c709d3149d2f5cfbc42a9b85e6d10c4ee77270534fdff2ccb816dedc6377257a73ba2a4ea82c0cc4a81bfb939f710b109beb279edeeda345867c0130ff6fc0fbbbbd25d16d2e73f6ab2fbb4c72abc0713ef9e4690b72208c468fa64c21f2247867b5edd514e5be23733ef2136b10e03161fe3b4e6018933df1f97a8250747e6140780a064e0c35bfafcb8177a75e8fde25e61e1c6332bbf7ddc3a11910feb3bd66707a7c1a9f87b320298acec88135a177b2f6f3c0ff02765f34c30b078b58470cd227ce4c0a1a77e662180b28fb360c3fbf47ba8982510979b86332b6d8a53d5f3947665c119a71e5b6ddc64228b47c3e23c30ebbede354d71d2780456de3f717a384112ccc9805dfe107fc9440ab7abccd8463150ce1306778fbea9793d88c4a7864a925bac5da593d6b72ee2c743f0b732a10285d293359c8126004f06fa12c0b3ca9c1e9c0a75b587207965c49f7450cb4210da01e5604f83f849b7eb9cb3f73de4bcff064b4fe08580971184db940bf29d6b8cea027dbfe0b78d6fa574da4b5b4db57b4e1939213c9848537baaf8076b1db4e767a467ec6a47c67aa33df96d7113cc9884124bca5579ce0058618b1b1f13ff3b102ff54e6bde7e29fd555acd6ffe6429af27301b6bbda38403d8f6f3266d7724f517778e7b2c1e13e3d83104ce743857c07b70cb5be8d6d8757d63a3bedfc9de79cc7e96c9833e15fd65f1336178568c2453a49cfc8c8dcb4f5d978f0a6144d62a5da6fd75d08a41084d4c59b345e07a5f4446af6e5216cc8d94347d333030015e262036f0a4fa6d2ae523654c55b37ac179efb66d230de5c70a9b33738ef0cd4ba2710d9ec03f426701101182051516a9be380a07e2555a8cda03eafc72d2bc2bc1dcadde4bb819692c1736b0ed203c4934842d791aae9e10bf239cc5393c9faf967109444c8f44532766ca5481f0ac16d25753a121727271c71d97b401dafe91588b362f2798f047deece9f860624b2d5753e46f929f8c2d03753e7245ffed6d8e36c7b380c4fb6a27e087a38b5e4a80f0043f95e5a20701c62692e684a764074e47badcecf8b2145be47b5b7089c249abcf0743a61b517004d120929d7846a39a46e0ddbcf5334fc01aca0bff31e67da8b3c88e38504db1dc3940c55bee158ae6dfcce289cf91106397d8e3990149a86c819e0354d785a4eed76fa6380491b01efbc23e7189ec253884d384865bca5da9a0917d68144a0a02cde867c365d339a025b7c8a16b82e341719a259ede8f09c165a354fd3e8f5d59e349e7c36302cf8ed115537969b598337fe7575157c89a254c0829cdb243d3d788321c756bf2817721db4bead96e1f25be5b8c7100d149d13900b6c6491ddbdbeaef7753ed5c5d9b07449bfd023501075ec08c37c13df696bf73500bd440a6522f5b955862c5eee8dc6e875c5055350b3397a6b31d2b764308ad24aafd4113af76f38f4aaba9e24efa3ad5b1c008a6cad2411ef6c7276dd3a5a2ae8130f91c36c34137731426fdf5272ef4ac5c4415e2d0f7b50da3ae910ba22bb5b962351e841746b00"}) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(0xffffffffffffffff, 0x81f8943c, &(0x7f0000000540)) (async) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(0xffffffffffffffff, 0x81f8943c, &(0x7f0000000540)={0x0, ""/256, 0x0, 0x0}) ioctl$BTRFS_IOC_INO_LOOKUP_USER(0xffffffffffffffff, 0xd000943e, &(0x7f0000001800)={0x0, 0x0, "02b66c11beed7c64705a2327ec95767e4393a580b2c3043a2fcfb08839b8897467f4a525091f0f7d2480b2fbdcfd2a3924b674e8aed38628fb035a463934e151ef7c0289d4fd7b308135026f77657ca78a849330727be579703bef5f51cf16a7198f8eb8e962cc55e47a88645dc99b6e4dfd15399b64e979124ef3a9120208d05d9de3ff1ce9cc9e6353b97b13c914e3530a6ec5b967674f3cbc69538c66356f6777af618dd96e1730048727e164fcc8139776c1e5061154ffe7838008ce90ccbf0827c03a28016d5f3bdabbc98bee9c405509e3e094ba1677d6b347061c346722828810ba1b68424c585770f6527f3856630aedb97f2ee0742e013d5d412046", "e26481ed1e7c639b5947fa03672a9556f2d9c88f35f8f8b62d6b01c1aef3d08f4ee43881217f959db47d280e8448925694f755ec0256840e58a31c14f0d78d223c58da8e0bd812fb893403e655823624c9e0581484207a6d914ad9befaa148971274f98aa0b753b38761ffcd20135aa09bee95ffc38cfb410de6eb0b1c0eaf69af8375bb982d21281acaa2966378f31ed037b8789d3bf55cda6f1fdefac7c7d4eec101525b850f72c5d3515de41ea6c4cc0a1d4c9d0e83fe98d8baa6325482d6270833cf890aba6768abf6a6ac45c0268bac824f692a521bf8500ce437d7bd4ecafbf918c063d8af3d110e24ddc569f535794d4c8c4ab3897b27310c8d39efca731b7a22caf0ec5f2df04ce6b496582e72b5cbb10480d59c191cd3eca3d3a973fe653216cd08c8de4098133f85da499af2f6a6c7513755f40d13810388f5476a67bb722dc832e4af4c76fda32aa9699c8191a644df90df4b2b2e7993e90bee48e3b65cbc84de44a15926c157018e46c849d57933b96f67dcec40eb733515b880aabbfd1324433ef61e0a112430d3829717820a9eb79c8767614facf386e07a7df2ceb2e9f9d33d65d33fecc7697f5bf74769a67ac297756ec495eaf87674ad5fae2fd772301896e85c617328f32f69bb718bf1ade1d8fd637a6bb4c7044929bf43757821b2bde4ce2ec164ef3841458207326249547ccd2c3ca3467e8c5474cb820ee8647c90d2da6af054fa1f25afb1b0792dcf21b2736c67fc8119a6bdbe19689bb639113f5b9165a50b6f2df8dd8c549220488da3bb10e45dcc7a6207635a550e5ee913aa8ec6ea5f92ba59eb003424e6ea472df633220c8e9644d5bf2f4b01027fc5c0422c39932531e1be7e62e021ac4592b57d95720d5ae86f4bd11d95ad82569ab24e0d1b5a144e6fbcc76c4d7800a70069a852979a3a1f02fcbef6bfeff2101576c998109c65350175bd05435d4be236097340cfafadfde768b1176e6c40f34731164afcd0c3620a0cd015494e432e9aff2c59a4cf476d9037a398183fe74789da0300172e1c02173ff867faf6fbed165ca819be3e4ea05fb6ff1633430984bece64ef93ff8d012b9c321941d59f5b8572008f6bb22597864ea1fbd6b9e349b6d9dc1adc185ea32d5e67e44ac5ebea2063635a53e1718a4246ea47a8874daae0ac6653f821f381a940cbfc98d4a25aea33c63309ee1c6c20f349b673509f09b5381956611359754bdb2eeda100ccb4834596de45556611d5b568f2604653c2129a7d5bd50a209a6d4a956c108b5349a8d208431a2edb17d7650332d308ea2e6f324e589f0e98de49ab655a77509637f0a65614c33f16f91e0d6305e8f9ba0766154e1599f67005ff75af7a6143e574acb799f2363dbb37971fc451c0054b5753de3e2278afe06b9e64b93ab09b4985fe26626a6266da1088f7b9755ac8a9e4e6e99a0dedfe2b1509de12f75d9dea83475543d2b328e986779f4992c870ad128b9d09e8ebcc264e8bb5b85d6d62ab902b8ed7409448cc26a777882627bf3e0c9cdd473289154560c2838c6d4fdaea97ff5d7992909c67710dfeb4977ea7bc06d77b06a19efa42178c46a3fa66ed7d6e2b67ab86be5d94721b099947563db706c5c3a12744ea61d63fce93f546f2669c50b6568e3f32d79bfc75adefc21ee93c5c5360cf2e7ac19571c3663857baf3325b666570642da4dcb309dab05b7ad05bf832d28fa8e871f7b89d6f4327953f4dbde3aac022f4dfe050d0924427e39a8027fbc4b54c4a8c2bf35f8c11c9c0ac4bc1fc31d6bdb2cee2675c5a782aa3ab5ffcc7be7207c89cdb8546fcfdd3a5aadfee5a296abd9afc127ebbf580649e9132b55d9f40a3778af49248b593e00c9f4812b8f7adb7323ab7852e4ed09c55aa356b2e8fee6eb14be3659fdf821d23ac556845f70670d8b6a417c29ac0b1585ea865634c0c921e9d930d4018f66d1e024179d80a7154c491c8662d427ab44a3633480782889f1d00c1d182ede30d4127d769291ef408b574a41c29153b7d949d4648b60a4faf32b380dbc87146c36479d47023876abd4ae7e289ba79d988f6c3a86a75bdc784bea2f0b7e2f77cb1282f54a18e1117e50ffa46ac208fcf8a7b5751e83b3e80cb428e4c2ae63ca3cf7b2c4353303edfe328626391f7925f1ab4ef1c7d0f54d0e45590188ecdb6d2c9c0db9b0552dc81b21dc1bbe9e94be5f78dad11f53d6fc3602e9ffb872d3557c44a133ee94e50d57d5eae2214569975982c63f22750db6f5c979ed81b3f68fe6be6daa85bcd98f25548c1d4db1f15394bc708933e0352e4059bedbf832abaf75e9bdca6fea93600357ca4153357d2e0661a308edf82c0f53e7a121c7e1e8bafdc5fdb61c21f4716d06b43a8395aa915a34c4dc4b5b15ba70a4e163728a9966515682b9ca3c2499aeb17a3c17f905fefed806b504f77c52564006273282db5e0c70565ee1016fc7c241830bef951294db9682ef41742b6550246e539143f15d6c2f017a8083ec97eb3a2de8bc2d8d5fec2b9b88996b3ff6c5aa5bed326f72fe9190de74a83e380b9fe89324421697d124d9a1bd3ced8e1856923147958582d737a36da6af4a0fd92b83f0ef0cb1a725d3a5ecb3a39ad039d200989a281a0686336457824ef582698222b7a063475b793a21745d6701940a0baf124461ad71de8dff6a6f7ed676d107e01ae6b7b79aa1f96efce91039dead977bf70365de8ebb3ec06634246d62f78287831984accef27048794468f520db5c71b4fa94818ddc7394fa1b609adb8cc80c0f32efac26a47bd74119100f0cfdcb889aab1019786cc0eefc1e4295ae919e9f6c8c8a16aa76a2bfe39adf5929e9b9925da0241e734378fe140981e3536bf0b77eb0263297a936a5f37605f128d79b16723b953331f99633b8d29ad1d1dbdb74188488ea0d1b7e0ddb652c040aa0477a079e92618e52f3b7ceebd62e0f0c6946469c19ae828b7eed288c3a53320c9d5468c39d608bd42d967a21e6b788de7c6826d1b99130083182562f63443290aeeb24308d3eb4881547db34b284d9af2bd7cac0d2f66bd14758c079c345d2bce3e1efc3599b0d06e69a92db7e05473673726e1848df75e83df50f98a9321468c10c6514dd7b3cb5b0ddf2fef13284463fe88ef2bc95d51288e41e3e0ba5c91bd686d7f7658b5bac8e3991a0f3b36f004585d9edd09d478ede73e7da067ea502894fc1247e62c1a84c9065ffccc3da96f07ddce135ceafdd784dda6f64a7add400d21ae13abf98e90fb96fcda23a8ad79905428a349b2230c19cf8cdca1724382bd19b4b075438098bd46cbc668bcbdcf1da85f733a50669f976a7106ff1936f20d799e7d01b0beeba7057a90016fb2d36bb3d14e11ac077ffb91f139d16ae5e78d84559312c9fd1a91ff70e5d9b4fd279420f7647151fe951b705082230ad20415d2f605665cd9374a50f7fc3c32efe30e4c0fc84f0c0e0bcd35e46665e4f29371c1c96324f65a94c85874e8e3baba68b88acc85c38f466d7353b5a00953c8ff5522cf0903e646301e3539d047286d10be16d9fbef02d450b7b12b1ead250f68c4c893fbc6f48765f34a81c477966278a1c6945d14d6531f0b5e4cef4edaf3708a5787162c39b236272694b0a302465d01004fa9d516414c040c27fccbf38466023b06293bd07f31fcdbb3b5378a8b2c7886e1551b1caeab96f7f3c159075756f5f8aaa05b6f87f581d802903c36d84169f87c01a77c43284adf66daa38fe8633a8f6d258231466aba89bb9e56ef289d2815cde433e381ddf74852302684b974c80d0a1d7d578039120a2db36213d167687e9390ed8f14c709d3149d2f5cfbc42a9b85e6d10c4ee77270534fdff2ccb816dedc6377257a73ba2a4ea82c0cc4a81bfb939f710b109beb279edeeda345867c0130ff6fc0fbbbbd25d16d2e73f6ab2fbb4c72abc0713ef9e4690b72208c468fa64c21f2247867b5edd514e5be23733ef2136b10e03161fe3b4e6018933df1f97a8250747e6140780a064e0c35bfafcb8177a75e8fde25e61e1c6332bbf7ddc3a11910feb3bd66707a7c1a9f87b320298acec88135a177b2f6f3c0ff02765f34c30b078b58470cd227ce4c0a1a77e662180b28fb360c3fbf47ba8982510979b86332b6d8a53d5f3947665c119a71e5b6ddc64228b47c3e23c30ebbede354d71d2780456de3f717a384112ccc9805dfe107fc9440ab7abccd8463150ce1306778fbea9793d88c4a7864a925bac5da593d6b72ee2c743f0b732a10285d293359c8126004f06fa12c0b3ca9c1e9c0a75b587207965c49f7450cb4210da01e5604f83f849b7eb9cb3f73de4bcff064b4fe08580971184db940bf29d6b8cea027dbfe0b78d6fa574da4b5b4db57b4e1939213c9848537baaf8076b1db4e767a467ec6a47c67aa33df96d7113cc9884124bca5579ce0058618b1b1f13ff3b102ff54e6bde7e29fd555acd6ffe6429af27301b6bbda38403d8f6f3266d7724f517778e7b2c1e13e3d83104ce743857c07b70cb5be8d6d8757d63a3bedfc9de79cc7e96c9833e15fd65f1336178568c2453a49cfc8c8dcb4f5d978f0a6144d62a5da6fd75d08a41084d4c59b345e07a5f4446af6e5216cc8d94347d333030015e262036f0a4fa6d2ae523654c55b37ac179efb66d230de5c70a9b33738ef0cd4ba2710d9ec03f426701101182051516a9be380a07e2555a8cda03eafc72d2bc2bc1dcadde4bb819692c1736b0ed203c4934842d791aae9e10bf239cc5393c9faf967109444c8f44532766ca5481f0ac16d25753a121727271c71d97b401dafe91588b362f2798f047deece9f860624b2d5753e46f929f8c2d03753e7245ffed6d8e36c7b380c4fb6a27e087a38b5e4a80f0043f95e5a20701c62692e684a764074e47badcecf8b2145be47b5b7089c249abcf0743a61b517004d120929d7846a39a46e0ddbcf5334fc01aca0bff31e67da8b3c88e38504db1dc3940c55bee158ae6dfcce289cf91106397d8e3990149a86c819e0354d785a4eed76fa6380491b01efbc23e7189ec253884d384865bca5da9a0917d68144a0a02cde867c365d339a025b7c8a16b82e341719a259ede8f09c165a354fd3e8f5d59e349e7c36302cf8ed115537969b598337fe7575157c89a254c0829cdb243d3d788321c756bf2817721db4bead96e1f25be5b8c7100d149d13900b6c6491ddbdbeaef7753ed5c5d9b07449bfd023501075ec08c37c13df696bf73500bd440a6522f5b955862c5eee8dc6e875c5055350b3397a6b31d2b764308ad24aafd4113af76f38f4aaba9e24efa3ad5b1c008a6cad2411ef6c7276dd3a5a2ae8130f91c36c34137731426fdf5272ef4ac5c4415e2d0f7b50da3ae910ba22bb5b962351e841746b"}) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(0xffffffffffffffff, 0xd000943d, &(0x7f00000745c0)={0x5af, [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r26}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r24}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r28, r27}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r27}, {}, {0x0, r23}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r25}, {}, {}, {}, {}, {}, {0x0, r23}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r27}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r25}, {}, {}, {}, {}, {}, {}, {0x0, r24}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r25}], 0x81, "7464fbe08eb369"}) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(0xffffffffffffffff, 0xd000943d, &(0x7f0000044f80)={0xffffffff, [{}, {0x0, r6}, {0x0, r6}, {}, {}, {0x0, r6}, {0x0, r6}, {}, {r5}, {r5}, {r4, r6}, {r5}, {r4, r6}, {r5}, {r4}, {}, {r5}, {}, {r4}, {r5}, {r4, r6}, {0x0, r6}, {}, {r4, r6}, {r5}, {r5}, {r5}, {r5, r6}, {0x0, r6}, {r5, r6}, {}, {}, {r4}, {0x0, r6}, {0x0, r6}, {r5}, {r5, r6}, {r4, r6}, {r4, r6}, {0x0, r6}, {}, {r5, r6}, {r5, r6}, {r5, r6}, {}, {0x0, r6}, {r4, r6}, {}, {r5}, {r5, r6}, {r5, r6}, {r5}, {0x0, r6}, {}, {}, {r5}, {0x0, r6}, {r4}, {}, {}, {r5}, {}, {r4, r6}, {}, {0x0, r6}, {}, {r5}, {r4, r6}, {r4}, {}, {r5, r6}, {r4}, {r5}, {r5}, {r4}, {}, {}, {0x0, r6}, {r5}, {r5}, {}, {}, {}, {r4}, {}, {r4}, {r5, r6}, {}, {r5}, {r5, r6}, {r5, r6}, {}, {r5, r6}, {r4}, {r5, r6}, {r4}, {}, {r4, r6}, {r5}, {r4, r6}, {r5}, {r5}, {0x0, r6}, {r4}, {r5}, {}, {r4}, {r5}, {r5}, {r4}, {r4}, {r5, r6}, {r5}, {r4}, {r4, r6}, {r4}, {}, {}, {r4}, {0x0, r6}, {r5, r6}, {0x0, r6}, {0x0, r6}, {r5, r6}, {r5}, {}, {0x0, r6}, {r4, r6}, {0x0, r6}, {}, {r4}, {}, {r4, r6}, {0x0, r6}, {r4}, {r4, r6}, {0x0, r6}, {r4, r6}, {r4}, {0x0, r6}, {}, {}, {}, {0x0, r6}, {0x0, r6}, {r4, r6}, {0x0, r6}, {r5, r6}, {r5}, {r4}, {r5, r6}, {r5, r6}, {0x0, r6}, {r5}, {}, {r5}, {0x0, r6}, {}, {}, {r5}, {r4, r6}, {0x0, r6}, {}, {r5, r6}, {r5, r6}, {0x0, r6}, {r5}, {}, {}, {0x0, r6}, {}, {r4, r6}, {0x0, r6}, {0x0, r6}, {}, {}, {}, {r4, r6}, {}, {r4}, {}, {r5}, {r4, r6}, {r4, r6}, {}, {r4}, {r5}, {}, {}, {0x0, r6}, {}, {r4}, {0x0, r6}, {r4}, {r4}, {}, {0x0, r6}, {}, {0x0, r6}, {}, {r5, r6}, {0x0, r6}, {r4, r6}, {}, {r4}, {0x0, r6}, {}, {r5, r6}, {r5, r6}, {0x0, r6}, {0x0, r6}, {r5}, {r4, r6}, {0x0, r6}, {}, {r4, r6}, {r5, r6}, {r5}, {}, {r5, r6}, {r5}, {r5, r6}, {r4}, {0x0, r6}, {r4, r6}, {}, {0x0, r6}, {}, {0x0, r6}, {0x0, r6}, {}, {0x0, r6}, {r4, r6}, {}, {r5}, {}, {}, {}, {}, {0x0, r6}, {r5}, {r5}, {r5, r6}, {r4}, {r4}, {r4, r6}, {r5}, {0x0, r6}, {r5, r6}, {}, {r4, r6}, {r9}, {r16, r20}, {r4, r6}, {r25, r6}], 0x7f, "432ed70badba95"}) write$binfmt_script(r0, &(0x7f00000000c0)=ANY=[@ANYBLOB="2321202e2f66696c832f0a"], 0xb) 01:55:12 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x18, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1963.234062][ T27] audit: type=1804 audit(1691718912.882:366): pid=27762 uid=0 auid=4294967295 ses=4294967295 subj=unconfined op=invalid_pcr cause=open_writers comm="syz-executor.5" name="/root/syzkaller-testdir4114978858/syzkaller.OpzNfI/3112/cgroup.controllers" dev="sda1" ino=1971 res=1 errno=0 [ 1963.247648][T27747] bond524: entered promiscuous mode [ 1963.269622][ T27] audit: type=1804 audit(1691718912.912:367): pid=27763 uid=0 auid=4294967295 ses=4294967295 subj=unconfined op=invalid_pcr cause=open_writers comm="syz-executor.5" name="/root/syzkaller-testdir4114978858/syzkaller.OpzNfI/3112/cgroup.controllers" dev="sda1" ino=1971 res=1 errno=0 [ 1963.311364][T27747] 8021q: adding VLAN 0 to HW filter on device bond524 01:55:13 executing program 1: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r0, 0x0) r1 = accept4(r0, 0x0, 0x0, 0x0) connect$unix(r1, &(0x7f0000000080)=@file={0x0, './file0\x00'}, 0x6e) (async) sendto$inet6(r1, &(0x7f00000000c0), 0xfffffdda, 0x0, 0x0, 0x600000000000004) (async) r2 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000140), 0xffffffffffffffff) sendmsg$NL80211_CMD_SET_COALESCE(r1, &(0x7f00000001c0)={&(0x7f0000000100)={0x10, 0x0, 0x0, 0x20000000}, 0xc, &(0x7f0000000180)={&(0x7f00000006c0)={0x10a8, r2, 0x2, 0x70bd27, 0x25dfdbfe, {{}, {@val={0x8}, @val={0xc, 0x99, {0x7, 0x61}}}}, [@NL80211_ATTR_COALESCE_RULE_CONDITION={0x8, 0x2, 0x1}, @NL80211_ATTR_COALESCE_RULE_DELAY={0x8, 0x1, 0x9}, @NL80211_ATTR_COALESCE_RULE_CONDITION={0x8}, @NL80211_ATTR_COALESCE_RULE_CONDITION={0x8, 0x2, 0x1}, @NL80211_ATTR_COALESCE_RULE_CONDITION={0x8, 0x2, 0x1}, @NL80211_ATTR_COALESCE_RULE_CONDITION={0x8}, @NL80211_ATTR_COALESCE_RULE_PKT_PATTERN={0x1040, 0x3, 0x0, 0x1, [{0x1008, 0x0, 0x0, 0x1, @NL80211_PKTPAT_PATTERN={0x1004, 0x2, "6d51246f3f36b059c67d0143048f151c9e8424d413f848d08bc5a909d35b95b0afb9129a4ff0f4d90c6a1536729de8ea0537be638713635786c1e40e9d52d208b5e6a034d9125a020a4741105a8e4ea52e48e3ae5333c8c65f7d445056eadcb79016c04cd7e79ae889c1f6f4cbc81f8f42b694b86562d8e715b59807e47804687a93c87b2bd2395cf8947b150ba01ccacc2fe620850804f4ae9f86db4defb2acb0fbe64d73239734fc88992fc2e8ef82688939006d6eba8abc580349b8382935d237d5d73534059b6a5f693a7139383820766b579805f0eafeb6acec953583dff0ecfb89d39e162f916d203b0455f699da7f6d3a04ff424c8a29da3eb7f467d41ed65c3fa455b1caae71e90a703111aa176f52c0589dd728b870d8fc805bc1f045e9ad95ea1e495cf457f37f9bd035811146fc70e2c7a4ba12bdac6a2ee7c2b68be7700d645d41e7bd522b37724364a4b50ffb549bddbfd13cbac91e45c04ec51d7014f5dd08a4c005a6e00e082a9294f45deb4afb391b513f62f7d0c05da67db02448ce1ff0d76f081412527eaaaeebcb4211ad0d1dd6bf753c964100f65b3fa43e96a4cd75e5678ab298746d6b3468824abc579d1d5730edbf1323fad8115569afb7c6ed01c01e274220a0ce920a3116f578fae7d36294164bdc2aa2a43a6cec73e3317ac9702240beaab32e12f47bfca69a6c336dec12de60f1c7eafa5a772ab535bb873230aea864b2ff19e6654968ad5d51a4afa23c5841760733a08978f7a5005e7da546476d0b869f1c465082dff576ebd9aaf1a438bc4faa044ba44819efaa3998d22db4f382e8213c68ba8c9728bf3a08b8081f397926ba5bed14d2f6d97f3b8b4b36864f3efecdad5bdcec4a424528ed26d82f85aeb0b4699eda424d06f83b3bf01327bb55996a6da947abb56f88cfe182485e30ed22b0a551b363d16b76eabf6820e107b74cf1d04e9c898787ecead7953b44e4ce02ef5f4a23c57e4397943f3705e1f2e3b4dfec1a67540bfe671d29c5e9d5d4bf4a0518d3fa84c4de70a3cc21305a299261e7cabf6f079cd39468397ebe1fc1c799f8a3e1bea477e2f30d8fcba1e89b7790b3afda7cc20064b6a32a41bcec07abae6b37421b38803e849285aa4a0c9cc17ce7416bd8aaef63fc98636e9044fe1af71cd5a025416eda7e4a89f0da821258f85400fbe47ec7862aaf39da1eb7decec09f15a33dd863f4686390b982b690228396ec6d982b41489ad8d96374eff6273b7f286f4c21d6f8d72f463782cd86792d4b849e1feb884a76db02e0ab26e568bfc8513060d393267cec2d226a05b05c2495f30af56873cdbf11c87f395d9cd0d592b11e14b9fa9c7ccaf27b5640ebc5dd0b19f2de1723698e93cd5a884544eab3743c9297187954cc860e0134f2e9e712c8b0c7bec5c6d68c7ec1a3e1a697bfd461de26fe1445327761dd1ee648d15a3b0764d74a41ee333a902be7fc8fa8af9b8b8cc12fe672b82aa3a5ea60e32f284b7cfd4173312ef8386c65e73f615878eb1f8c7bd8c51ed2947662d490d002a9cd252fc91c22e80947b2b77503b9e3031fe24e5a720f34fc42b1e16988c61b91cdd9857beca586c0450c1232f6dbf8062ab73676e68fd042a6177d1faae1fa7dc757c679f22b174de3b1da38dbbb8ad523329728e638db33830fad2f949ea8cd8afc310cbcf8fc526084a53541a37ed6167f8307dcec42f2084b5dfe06899b9a9d83ee8e0b5efc40c46af0cbabdbee9c7946a9f4342c1e1ba4c983e182449ade54211d833e4d870123ae09565f65df0c97b81a632a8f313dc04ce6e284e85de3b0a4b07ab0869b321d4ef71863ca17d44a9a51879dc88586c779c830c9ee725c8e87a5f4153be4dad653698ec035a3792d6450f8c638109f9c048d964268f4ba5fb6defc837498b23acef520f452979ca17006ee6e9305dc17d77e585e9e3174b7cfbdca1d7373eba06cf39e568f72cafc051ba0b46de40b52cab65d3f929d74ee6b220e21d5f3cddb0a860e63a8fbf1999cf8ecbb610695976ca2b4baee13822a242d7ba7aebf05e2bde9cbe6736c686ca09f6ee51fec64b6f03e28ebd1c6026d03ed1b321e34dccbe2aed1eefcb0d61ac6f33ab0e256d36f0ba1739931541ce8fee4c8e0bd7694f6a7a92e0767ad5aa6a6517e57d17304ee51d1735b10c103a28ac210cc9e5df14f88cb203f20b9327662c3c434ab2318fd0fca4d584f8ff4df648c309ec032131995d450760ed826d0cb193d0c8f90b060e4559d80aa1cb542dd2b0ebd8d0e9c1e5ea86e3cd927c81b3ddcdd11c9a70f1317ecdb69c5add3a30548502f5dc63f6c42bf7ae8064eccff4391edb08f2b9d8f4557548185b7bd47fa478ed2e2661c77b8b2f5bf980a89937be7fbd43fa788433ddf16de6651c817c966140ea36a4d484ee2a56d9ef13365787e9f8bdcaa01b7c326729202043a41f21b8c146bdf3170c9233403e4d8f0b9a379d3ab0360dbf4490158b2e87a772ee040e565c72f398549437c2e2307784e7f27cb01bbf0faa4a3d63ecdb0353756d7b20e765324d38993861317d9e7d88325919ffc0e91663cac87d1a9dbbb57b565f0ce2330ebe882eedb752847eb093a8858c15cbcdbd7dd9d76579510ed2d48ee4e588a9490b45f6a3ab4025fb7b36d5c41569eefcacee805402d5799bdd86cccb88d63acfbc45d0b3050a6819af76df58ce519b4e38db9357389695a62dc53a438a628956901f1f8d3178925fee52297f20ec7e2820321044ef45eb3526a3f72776a6f6cf1e256385ed3697603f2391eb7d4a713808125c7a741449e12a90322439b12fec74a6eab617c0104cc01405d92cffaf87731fc272663c7a158c65cdcb29ba89ed03ce67a576401aeda141426ccfb466b5dc7a627fefe6a42a6319c7cfbce79c0c1d3b78d6fbdb13a19a2d7f646982d62b7dbc4c7bb72e5dddb54db7040c4bd31ea5c48ea7e75ce5ab1d51da99fa1b07a045bf3236174a77d64d4071f51397fd351d27f86cb7ebe426b93d39a6ca62afd987f0530de95c4c363593e306c0df4fbe01a57b425804e10cc343a2f91ec3886c5f9d42858899f3675795bfa252ba64cc8fc228e86e997428133a76de24435b945cae6469553c2a5da40c34ea5b09bd57ea061ca738bce857b6ea71c4f8933177b05fa4214c8991649a6f8816fe7316d53ecc3e5418f47926b12ec940ff42b3e9c5690c6ffe301b9c213fb87ec64b26114eeeaae1de5fcd22aec4144d786da50261479c59136cb5377f2dd7b3adb9fb0b9a61689cc6c9d5c760baa1b1b86ab24c27997deff01c0b6755b94f3835a040d9428a4e8fcbf56bdce95f85543ebc81d0ddfa54943315e48d2e0b65d8e0568e0b489a436a09097aef5b3542eeb53f46fcdf57655c5992ff47ac16b86a8e2f94ed121128c43f56fab9be77ed0cf681c85621dc6de85cf5b096782992afeb51d236f47e7b09d81d08b3fb001b4bb81e88124a59999e51bf98470c922f6c66cd9c66da53520fb98b37959489c04a4439f66a366a5a2f7ca258c5de83a60a9b2352fdbd9102a93ba49330db79c8567899b14fb0d22366eabeaaa6fbfb7253b388995408d4f63de8908009fe4abee95b347346436f339a37b994e3c695eb9ae47f2cb327c4dd899faadce46b011944fe2ef1f32df6dbdca110bb005792eedb72e85726f3124f85b01068db7b5b827c37e527b3f9050ca4cc360af842b71aaf0ec8a9959f359fcae45df395ce6078359d571c6a75ab1a3350bd23de0922f4ffd6414b6438583e51377d3489651b8b11ed645b12a96e9cefe28d34b007757be948c9907e2aa08445454290ddf852b39ccaf2f217f626b482445fe355f47d053ed3a543a54a32c6517a45e641843638f9cf06a714572d513b6bf5c16d297780912993f8080fb1bca2245daf1c09718cc4ce239fb28b00168b332f0bfac69b1c1b00dd664111d9ba81f06ee6cde172d996971f1cf6026d4e4b6a02ee3416232509e3188c5d4c60066c4710114bce486ad42e25e6ed12dbf61a13210f6753e32807c814f420cf6c4af4d12a476c2f20e3ee2e26c21c9fff724dc5cbbc8a1b33da58c75df6769293e6db048958a9bc56f4831ca3cb0add3b9ea327998572c3a61425ef9852b6cbde37d0fd26748498aad4bd6a889d62fda6483e7d42d49a32a443b0b23e3d9d9ce7fe58e55e5a2e0f8cd9baa5daaa6e687f44c75b39dfced96e2ba3f348fe3ab24990bd8bcf3f4e8b375bba6e2aa222fcaf9f1fe4698d82684ca0c72963f3b9358fe1809266bf8fd506747712e1fa19d46daa19a7d4e42745ac217816a06a8420f609fea84be2f70ae1d6d43998ffa09abb680c3198b5e36010ef30543375e749546cf7d6919882bdd66d7943e972c2c7a68f730e9fd5efdf744306078123a47e157521beb651eae11c3990917964a502e2d325a38bd1335791749766b78adaf1862d5ade55d5aac4ca1d358d3aa449d5c62e2e94996d2f4f97f1e8f24966f2dfaac52e4df48de073dcfb1244d18b80ad97bad8f1a826dc2777864795b1bf80754d3bf84ee61e5ff5c5107b95d9e22692a99cba9ee0469cd52a3bd8834f6049a3f83a29155edfae2f4193c8fb0fb373e5cc7cf16abfad48717037320ba98eda8b3b00d69d28f2e29de35e9e2b74769d27d27fd40dc716c6f550ffcd40b35014ba68b2ff5c48a2ec7a0938ec6b08c850bae5885e4e8b91e7641010aa898d3e39e3511ce25217ea3a29a90aae370b57dd14aa8fd8fd980ec1fa4e77870d3be172747448b3c0bdbbef616c3911299733aea494a0b850cab128760d374a7df7ca868ebd64d3949a59b2b621381ba683bd8d383fd1975dcc99ebcdf9c3f04e1987f0a9ec32753ea5e53b67ba6c200ed93679ced5b4d84f5d6bd1c3abd4b32749025aecf88c9c03390455fd0b55a47e67de4f7ff78af1c40b602983d01de7d99c5084ca89749143f799f2f3d9ad588c5e5a3d75329b61c29ac19344436773515dc67f521dad7a17ef4445f3462da1caa91c8c80c3698e1961c32596e1a410cc59021375263b552267f52653cf1ba597eb21b883ff201e293c078c4aa5ad2c3569438a9a4b93141d8dda2d2ecfda7ed52bb02ae97271015bdfaa5276ca6024122ac48d6cf62b223fbd17e3c87ec58a78f745c5e01e8df088b4d5018b92d45fb1842b64a4ae93490443c1bc0a5cd170d36da85531f6c9a66c4a7db9c395518c4f2c7b06d19537a838f64f57c23b6307d9c0dc5f97396f41151485c4bfbab063b1327da49f4407738ea1385cfe0d4c763c9bee053db02e3027d47c6bf771a2cc6bf932cfd5ad42695799a634e39105ab128dd6265af8a8093ab116e98af5b2af91e34129d84a50e966408b45603f65bc54ecde64071cf6af124ebb6bcd799dc3637a2981c9bccbb86427c1b8fab76d39fabe8fa4bd01d0ca456904ca408fa7aee50824c5b0ce28357cf8da36cdde6ea83be8ed2947173d0dc3cad64b9d6601e62854063b65ea490e3a4ec24835d5689b9888b7ec6fdcda38f956741d44c84c064dd91c003e931dea393ba15f0ab47f3f6889e4e64a3d0ce1d4962b35dc2b075afe01978bb64ed91bce56749752de33988796f98fa1617e254047b8499639b27043cb3350d3efed3ffed103f720e8cbea0ae4e122493ffc683e3fc9be5a3557ba7df7127cc2a283b2ca7187228d2f8d495019766b0fa744a025e185742eb78153b4aa7e80128d6de6af1fec8ea3f715c178c86d100f9d22a036d2225ee144e29cf56737cb6c93b8f9d82694a877232e0c3b31c9a"}}, {0xc, 0x0, 0x0, 0x1, @NL80211_PKTPAT_OFFSET={0x8, 0x3, 0x7f}}, {0x28, 0x0, 0x0, 0x1, @NL80211_PKTPAT_PATTERN={0x22, 0x2, "f5bf8a3fdd833fd4f35aa085c2ffad03e9c1e8a3551cff5f2683f36f4815"}}]}, @NL80211_ATTR_COALESCE_RULE_PKT_PATTERN={0x10, 0x3, 0x0, 0x1, [{0xc, 0x0, 0x0, 0x1, @NL80211_PKTPAT_OFFSET={0x8, 0x3, 0x7}}]}]}, 0x10a8}, 0x1, 0x0, 0x0, 0x10}, 0x1) r3 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r3, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r3, 0x0) accept4(r3, 0x0, 0x0, 0x0) r4 = socket$netlink(0x10, 0x3, 0x0) (async) r5 = socket$netlink(0x10, 0x3, 0x0) (async) r6 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r6, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) (async) getsockname$packet(r6, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r5, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r7, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) (async, rerun: 32) sendmsg$nl_route(r4, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r7}]}, 0x4c}}, 0x0) (rerun: 32) [ 1963.383166][T27748] bond524: (slave bridge466): making interface the new active one [ 1963.392063][T27748] bridge466: entered promiscuous mode [ 1963.405094][T27748] bond524: (slave bridge466): Enslaving as an active interface with an up link [ 1963.417597][T27750] netlink: 'syz-executor.0': attribute type 1 has an invalid length. [ 1963.478337][T27750] bond1010: entered promiscuous mode [ 1963.484100][T27750] 8021q: adding VLAN 0 to HW filter on device bond1010 [ 1963.537688][T27751] bond1010: (slave bridge975): making interface the new active one [ 1963.545983][T27751] bridge975: entered promiscuous mode [ 1963.555924][T27751] bond1010: (slave bridge975): Enslaving as an active interface with an up link 01:55:13 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x3f2, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1963.586769][T27753] netlink: 'syz-executor.3': attribute type 1 has an invalid length. 01:55:13 executing program 1: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r0, 0x0) r1 = accept4(r0, 0x0, 0x0, 0x0) connect$unix(r1, &(0x7f0000000080)=@file={0x0, './file0\x00'}, 0x6e) sendto$inet6(r1, &(0x7f00000000c0), 0xfffffdda, 0x0, 0x0, 0x600000000000004) r2 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000140), 0xffffffffffffffff) sendmsg$NL80211_CMD_SET_COALESCE(r1, &(0x7f00000001c0)={&(0x7f0000000100)={0x10, 0x0, 0x0, 0x20000000}, 0xc, &(0x7f0000000180)={&(0x7f00000006c0)={0x10a8, r2, 0x2, 0x70bd27, 0x25dfdbfe, {{}, {@val={0x8}, @val={0xc, 0x99, {0x7, 0x61}}}}, [@NL80211_ATTR_COALESCE_RULE_CONDITION={0x8, 0x2, 0x1}, @NL80211_ATTR_COALESCE_RULE_DELAY={0x8, 0x1, 0x9}, @NL80211_ATTR_COALESCE_RULE_CONDITION={0x8}, @NL80211_ATTR_COALESCE_RULE_CONDITION={0x8, 0x2, 0x1}, @NL80211_ATTR_COALESCE_RULE_CONDITION={0x8, 0x2, 0x1}, @NL80211_ATTR_COALESCE_RULE_CONDITION={0x8}, @NL80211_ATTR_COALESCE_RULE_PKT_PATTERN={0x1040, 0x3, 0x0, 0x1, [{0x1008, 0x0, 0x0, 0x1, @NL80211_PKTPAT_PATTERN={0x1004, 0x2, "6d51246f3f36b059c67d0143048f151c9e8424d413f848d08bc5a909d35b95b0afb9129a4ff0f4d90c6a1536729de8ea0537be638713635786c1e40e9d52d208b5e6a034d9125a020a4741105a8e4ea52e48e3ae5333c8c65f7d445056eadcb79016c04cd7e79ae889c1f6f4cbc81f8f42b694b86562d8e715b59807e47804687a93c87b2bd2395cf8947b150ba01ccacc2fe620850804f4ae9f86db4defb2acb0fbe64d73239734fc88992fc2e8ef82688939006d6eba8abc580349b8382935d237d5d73534059b6a5f693a7139383820766b579805f0eafeb6acec953583dff0ecfb89d39e162f916d203b0455f699da7f6d3a04ff424c8a29da3eb7f467d41ed65c3fa455b1caae71e90a703111aa176f52c0589dd728b870d8fc805bc1f045e9ad95ea1e495cf457f37f9bd035811146fc70e2c7a4ba12bdac6a2ee7c2b68be7700d645d41e7bd522b37724364a4b50ffb549bddbfd13cbac91e45c04ec51d7014f5dd08a4c005a6e00e082a9294f45deb4afb391b513f62f7d0c05da67db02448ce1ff0d76f081412527eaaaeebcb4211ad0d1dd6bf753c964100f65b3fa43e96a4cd75e5678ab298746d6b3468824abc579d1d5730edbf1323fad8115569afb7c6ed01c01e274220a0ce920a3116f578fae7d36294164bdc2aa2a43a6cec73e3317ac9702240beaab32e12f47bfca69a6c336dec12de60f1c7eafa5a772ab535bb873230aea864b2ff19e6654968ad5d51a4afa23c5841760733a08978f7a5005e7da546476d0b869f1c465082dff576ebd9aaf1a438bc4faa044ba44819efaa3998d22db4f382e8213c68ba8c9728bf3a08b8081f397926ba5bed14d2f6d97f3b8b4b36864f3efecdad5bdcec4a424528ed26d82f85aeb0b4699eda424d06f83b3bf01327bb55996a6da947abb56f88cfe182485e30ed22b0a551b363d16b76eabf6820e107b74cf1d04e9c898787ecead7953b44e4ce02ef5f4a23c57e4397943f3705e1f2e3b4dfec1a67540bfe671d29c5e9d5d4bf4a0518d3fa84c4de70a3cc21305a299261e7cabf6f079cd39468397ebe1fc1c799f8a3e1bea477e2f30d8fcba1e89b7790b3afda7cc20064b6a32a41bcec07abae6b37421b38803e849285aa4a0c9cc17ce7416bd8aaef63fc98636e9044fe1af71cd5a025416eda7e4a89f0da821258f85400fbe47ec7862aaf39da1eb7decec09f15a33dd863f4686390b982b690228396ec6d982b41489ad8d96374eff6273b7f286f4c21d6f8d72f463782cd86792d4b849e1feb884a76db02e0ab26e568bfc8513060d393267cec2d226a05b05c2495f30af56873cdbf11c87f395d9cd0d592b11e14b9fa9c7ccaf27b5640ebc5dd0b19f2de1723698e93cd5a884544eab3743c9297187954cc860e0134f2e9e712c8b0c7bec5c6d68c7ec1a3e1a697bfd461de26fe1445327761dd1ee648d15a3b0764d74a41ee333a902be7fc8fa8af9b8b8cc12fe672b82aa3a5ea60e32f284b7cfd4173312ef8386c65e73f615878eb1f8c7bd8c51ed2947662d490d002a9cd252fc91c22e80947b2b77503b9e3031fe24e5a720f34fc42b1e16988c61b91cdd9857beca586c0450c1232f6dbf8062ab73676e68fd042a6177d1faae1fa7dc757c679f22b174de3b1da38dbbb8ad523329728e638db33830fad2f949ea8cd8afc310cbcf8fc526084a53541a37ed6167f8307dcec42f2084b5dfe06899b9a9d83ee8e0b5efc40c46af0cbabdbee9c7946a9f4342c1e1ba4c983e182449ade54211d833e4d870123ae09565f65df0c97b81a632a8f313dc04ce6e284e85de3b0a4b07ab0869b321d4ef71863ca17d44a9a51879dc88586c779c830c9ee725c8e87a5f4153be4dad653698ec035a3792d6450f8c638109f9c048d964268f4ba5fb6defc837498b23acef520f452979ca17006ee6e9305dc17d77e585e9e3174b7cfbdca1d7373eba06cf39e568f72cafc051ba0b46de40b52cab65d3f929d74ee6b220e21d5f3cddb0a860e63a8fbf1999cf8ecbb610695976ca2b4baee13822a242d7ba7aebf05e2bde9cbe6736c686ca09f6ee51fec64b6f03e28ebd1c6026d03ed1b321e34dccbe2aed1eefcb0d61ac6f33ab0e256d36f0ba1739931541ce8fee4c8e0bd7694f6a7a92e0767ad5aa6a6517e57d17304ee51d1735b10c103a28ac210cc9e5df14f88cb203f20b9327662c3c434ab2318fd0fca4d584f8ff4df648c309ec032131995d450760ed826d0cb193d0c8f90b060e4559d80aa1cb542dd2b0ebd8d0e9c1e5ea86e3cd927c81b3ddcdd11c9a70f1317ecdb69c5add3a30548502f5dc63f6c42bf7ae8064eccff4391edb08f2b9d8f4557548185b7bd47fa478ed2e2661c77b8b2f5bf980a89937be7fbd43fa788433ddf16de6651c817c966140ea36a4d484ee2a56d9ef13365787e9f8bdcaa01b7c326729202043a41f21b8c146bdf3170c9233403e4d8f0b9a379d3ab0360dbf4490158b2e87a772ee040e565c72f398549437c2e2307784e7f27cb01bbf0faa4a3d63ecdb0353756d7b20e765324d38993861317d9e7d88325919ffc0e91663cac87d1a9dbbb57b565f0ce2330ebe882eedb752847eb093a8858c15cbcdbd7dd9d76579510ed2d48ee4e588a9490b45f6a3ab4025fb7b36d5c41569eefcacee805402d5799bdd86cccb88d63acfbc45d0b3050a6819af76df58ce519b4e38db9357389695a62dc53a438a628956901f1f8d3178925fee52297f20ec7e2820321044ef45eb3526a3f72776a6f6cf1e256385ed3697603f2391eb7d4a713808125c7a741449e12a90322439b12fec74a6eab617c0104cc01405d92cffaf87731fc272663c7a158c65cdcb29ba89ed03ce67a576401aeda141426ccfb466b5dc7a627fefe6a42a6319c7cfbce79c0c1d3b78d6fbdb13a19a2d7f646982d62b7dbc4c7bb72e5dddb54db7040c4bd31ea5c48ea7e75ce5ab1d51da99fa1b07a045bf3236174a77d64d4071f51397fd351d27f86cb7ebe426b93d39a6ca62afd987f0530de95c4c363593e306c0df4fbe01a57b425804e10cc343a2f91ec3886c5f9d42858899f3675795bfa252ba64cc8fc228e86e997428133a76de24435b945cae6469553c2a5da40c34ea5b09bd57ea061ca738bce857b6ea71c4f8933177b05fa4214c8991649a6f8816fe7316d53ecc3e5418f47926b12ec940ff42b3e9c5690c6ffe301b9c213fb87ec64b26114eeeaae1de5fcd22aec4144d786da50261479c59136cb5377f2dd7b3adb9fb0b9a61689cc6c9d5c760baa1b1b86ab24c27997deff01c0b6755b94f3835a040d9428a4e8fcbf56bdce95f85543ebc81d0ddfa54943315e48d2e0b65d8e0568e0b489a436a09097aef5b3542eeb53f46fcdf57655c5992ff47ac16b86a8e2f94ed121128c43f56fab9be77ed0cf681c85621dc6de85cf5b096782992afeb51d236f47e7b09d81d08b3fb001b4bb81e88124a59999e51bf98470c922f6c66cd9c66da53520fb98b37959489c04a4439f66a366a5a2f7ca258c5de83a60a9b2352fdbd9102a93ba49330db79c8567899b14fb0d22366eabeaaa6fbfb7253b388995408d4f63de8908009fe4abee95b347346436f339a37b994e3c695eb9ae47f2cb327c4dd899faadce46b011944fe2ef1f32df6dbdca110bb005792eedb72e85726f3124f85b01068db7b5b827c37e527b3f9050ca4cc360af842b71aaf0ec8a9959f359fcae45df395ce6078359d571c6a75ab1a3350bd23de0922f4ffd6414b6438583e51377d3489651b8b11ed645b12a96e9cefe28d34b007757be948c9907e2aa08445454290ddf852b39ccaf2f217f626b482445fe355f47d053ed3a543a54a32c6517a45e641843638f9cf06a714572d513b6bf5c16d297780912993f8080fb1bca2245daf1c09718cc4ce239fb28b00168b332f0bfac69b1c1b00dd664111d9ba81f06ee6cde172d996971f1cf6026d4e4b6a02ee3416232509e3188c5d4c60066c4710114bce486ad42e25e6ed12dbf61a13210f6753e32807c814f420cf6c4af4d12a476c2f20e3ee2e26c21c9fff724dc5cbbc8a1b33da58c75df6769293e6db048958a9bc56f4831ca3cb0add3b9ea327998572c3a61425ef9852b6cbde37d0fd26748498aad4bd6a889d62fda6483e7d42d49a32a443b0b23e3d9d9ce7fe58e55e5a2e0f8cd9baa5daaa6e687f44c75b39dfced96e2ba3f348fe3ab24990bd8bcf3f4e8b375bba6e2aa222fcaf9f1fe4698d82684ca0c72963f3b9358fe1809266bf8fd506747712e1fa19d46daa19a7d4e42745ac217816a06a8420f609fea84be2f70ae1d6d43998ffa09abb680c3198b5e36010ef30543375e749546cf7d6919882bdd66d7943e972c2c7a68f730e9fd5efdf744306078123a47e157521beb651eae11c3990917964a502e2d325a38bd1335791749766b78adaf1862d5ade55d5aac4ca1d358d3aa449d5c62e2e94996d2f4f97f1e8f24966f2dfaac52e4df48de073dcfb1244d18b80ad97bad8f1a826dc2777864795b1bf80754d3bf84ee61e5ff5c5107b95d9e22692a99cba9ee0469cd52a3bd8834f6049a3f83a29155edfae2f4193c8fb0fb373e5cc7cf16abfad48717037320ba98eda8b3b00d69d28f2e29de35e9e2b74769d27d27fd40dc716c6f550ffcd40b35014ba68b2ff5c48a2ec7a0938ec6b08c850bae5885e4e8b91e7641010aa898d3e39e3511ce25217ea3a29a90aae370b57dd14aa8fd8fd980ec1fa4e77870d3be172747448b3c0bdbbef616c3911299733aea494a0b850cab128760d374a7df7ca868ebd64d3949a59b2b621381ba683bd8d383fd1975dcc99ebcdf9c3f04e1987f0a9ec32753ea5e53b67ba6c200ed93679ced5b4d84f5d6bd1c3abd4b32749025aecf88c9c03390455fd0b55a47e67de4f7ff78af1c40b602983d01de7d99c5084ca89749143f799f2f3d9ad588c5e5a3d75329b61c29ac19344436773515dc67f521dad7a17ef4445f3462da1caa91c8c80c3698e1961c32596e1a410cc59021375263b552267f52653cf1ba597eb21b883ff201e293c078c4aa5ad2c3569438a9a4b93141d8dda2d2ecfda7ed52bb02ae97271015bdfaa5276ca6024122ac48d6cf62b223fbd17e3c87ec58a78f745c5e01e8df088b4d5018b92d45fb1842b64a4ae93490443c1bc0a5cd170d36da85531f6c9a66c4a7db9c395518c4f2c7b06d19537a838f64f57c23b6307d9c0dc5f97396f41151485c4bfbab063b1327da49f4407738ea1385cfe0d4c763c9bee053db02e3027d47c6bf771a2cc6bf932cfd5ad42695799a634e39105ab128dd6265af8a8093ab116e98af5b2af91e34129d84a50e966408b45603f65bc54ecde64071cf6af124ebb6bcd799dc3637a2981c9bccbb86427c1b8fab76d39fabe8fa4bd01d0ca456904ca408fa7aee50824c5b0ce28357cf8da36cdde6ea83be8ed2947173d0dc3cad64b9d6601e62854063b65ea490e3a4ec24835d5689b9888b7ec6fdcda38f956741d44c84c064dd91c003e931dea393ba15f0ab47f3f6889e4e64a3d0ce1d4962b35dc2b075afe01978bb64ed91bce56749752de33988796f98fa1617e254047b8499639b27043cb3350d3efed3ffed103f720e8cbea0ae4e122493ffc683e3fc9be5a3557ba7df7127cc2a283b2ca7187228d2f8d495019766b0fa744a025e185742eb78153b4aa7e80128d6de6af1fec8ea3f715c178c86d100f9d22a036d2225ee144e29cf56737cb6c93b8f9d82694a877232e0c3b31c9a"}}, {0xc, 0x0, 0x0, 0x1, @NL80211_PKTPAT_OFFSET={0x8, 0x3, 0x7f}}, {0x28, 0x0, 0x0, 0x1, @NL80211_PKTPAT_PATTERN={0x22, 0x2, "f5bf8a3fdd833fd4f35aa085c2ffad03e9c1e8a3551cff5f2683f36f4815"}}]}, @NL80211_ATTR_COALESCE_RULE_PKT_PATTERN={0x10, 0x3, 0x0, 0x1, [{0xc, 0x0, 0x0, 0x1, @NL80211_PKTPAT_OFFSET={0x8, 0x3, 0x7}}]}]}, 0x10a8}, 0x1, 0x0, 0x0, 0x10}, 0x1) r3 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r3, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r3, 0x0) accept4(r3, 0x0, 0x0, 0x0) r4 = socket$netlink(0x10, 0x3, 0x0) r5 = socket$netlink(0x10, 0x3, 0x0) r6 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r6, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r6, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r5, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r7, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r4, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r7}]}, 0x4c}}, 0x0) socket$inet6_tcp(0xa, 0x1, 0x0) (async) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) (async) listen(r0, 0x0) (async) accept4(r0, 0x0, 0x0, 0x0) (async) connect$unix(r1, &(0x7f0000000080)=@file={0x0, './file0\x00'}, 0x6e) (async) sendto$inet6(r1, &(0x7f00000000c0), 0xfffffdda, 0x0, 0x0, 0x600000000000004) (async) syz_genetlink_get_family_id$nl80211(&(0x7f0000000140), 0xffffffffffffffff) (async) sendmsg$NL80211_CMD_SET_COALESCE(r1, &(0x7f00000001c0)={&(0x7f0000000100)={0x10, 0x0, 0x0, 0x20000000}, 0xc, &(0x7f0000000180)={&(0x7f00000006c0)={0x10a8, r2, 0x2, 0x70bd27, 0x25dfdbfe, {{}, {@val={0x8}, @val={0xc, 0x99, {0x7, 0x61}}}}, [@NL80211_ATTR_COALESCE_RULE_CONDITION={0x8, 0x2, 0x1}, @NL80211_ATTR_COALESCE_RULE_DELAY={0x8, 0x1, 0x9}, @NL80211_ATTR_COALESCE_RULE_CONDITION={0x8}, @NL80211_ATTR_COALESCE_RULE_CONDITION={0x8, 0x2, 0x1}, @NL80211_ATTR_COALESCE_RULE_CONDITION={0x8, 0x2, 0x1}, @NL80211_ATTR_COALESCE_RULE_CONDITION={0x8}, @NL80211_ATTR_COALESCE_RULE_PKT_PATTERN={0x1040, 0x3, 0x0, 0x1, [{0x1008, 0x0, 0x0, 0x1, @NL80211_PKTPAT_PATTERN={0x1004, 0x2, "6d51246f3f36b059c67d0143048f151c9e8424d413f848d08bc5a909d35b95b0afb9129a4ff0f4d90c6a1536729de8ea0537be638713635786c1e40e9d52d208b5e6a034d9125a020a4741105a8e4ea52e48e3ae5333c8c65f7d445056eadcb79016c04cd7e79ae889c1f6f4cbc81f8f42b694b86562d8e715b59807e47804687a93c87b2bd2395cf8947b150ba01ccacc2fe620850804f4ae9f86db4defb2acb0fbe64d73239734fc88992fc2e8ef82688939006d6eba8abc580349b8382935d237d5d73534059b6a5f693a7139383820766b579805f0eafeb6acec953583dff0ecfb89d39e162f916d203b0455f699da7f6d3a04ff424c8a29da3eb7f467d41ed65c3fa455b1caae71e90a703111aa176f52c0589dd728b870d8fc805bc1f045e9ad95ea1e495cf457f37f9bd035811146fc70e2c7a4ba12bdac6a2ee7c2b68be7700d645d41e7bd522b37724364a4b50ffb549bddbfd13cbac91e45c04ec51d7014f5dd08a4c005a6e00e082a9294f45deb4afb391b513f62f7d0c05da67db02448ce1ff0d76f081412527eaaaeebcb4211ad0d1dd6bf753c964100f65b3fa43e96a4cd75e5678ab298746d6b3468824abc579d1d5730edbf1323fad8115569afb7c6ed01c01e274220a0ce920a3116f578fae7d36294164bdc2aa2a43a6cec73e3317ac9702240beaab32e12f47bfca69a6c336dec12de60f1c7eafa5a772ab535bb873230aea864b2ff19e6654968ad5d51a4afa23c5841760733a08978f7a5005e7da546476d0b869f1c465082dff576ebd9aaf1a438bc4faa044ba44819efaa3998d22db4f382e8213c68ba8c9728bf3a08b8081f397926ba5bed14d2f6d97f3b8b4b36864f3efecdad5bdcec4a424528ed26d82f85aeb0b4699eda424d06f83b3bf01327bb55996a6da947abb56f88cfe182485e30ed22b0a551b363d16b76eabf6820e107b74cf1d04e9c898787ecead7953b44e4ce02ef5f4a23c57e4397943f3705e1f2e3b4dfec1a67540bfe671d29c5e9d5d4bf4a0518d3fa84c4de70a3cc21305a299261e7cabf6f079cd39468397ebe1fc1c799f8a3e1bea477e2f30d8fcba1e89b7790b3afda7cc20064b6a32a41bcec07abae6b37421b38803e849285aa4a0c9cc17ce7416bd8aaef63fc98636e9044fe1af71cd5a025416eda7e4a89f0da821258f85400fbe47ec7862aaf39da1eb7decec09f15a33dd863f4686390b982b690228396ec6d982b41489ad8d96374eff6273b7f286f4c21d6f8d72f463782cd86792d4b849e1feb884a76db02e0ab26e568bfc8513060d393267cec2d226a05b05c2495f30af56873cdbf11c87f395d9cd0d592b11e14b9fa9c7ccaf27b5640ebc5dd0b19f2de1723698e93cd5a884544eab3743c9297187954cc860e0134f2e9e712c8b0c7bec5c6d68c7ec1a3e1a697bfd461de26fe1445327761dd1ee648d15a3b0764d74a41ee333a902be7fc8fa8af9b8b8cc12fe672b82aa3a5ea60e32f284b7cfd4173312ef8386c65e73f615878eb1f8c7bd8c51ed2947662d490d002a9cd252fc91c22e80947b2b77503b9e3031fe24e5a720f34fc42b1e16988c61b91cdd9857beca586c0450c1232f6dbf8062ab73676e68fd042a6177d1faae1fa7dc757c679f22b174de3b1da38dbbb8ad523329728e638db33830fad2f949ea8cd8afc310cbcf8fc526084a53541a37ed6167f8307dcec42f2084b5dfe06899b9a9d83ee8e0b5efc40c46af0cbabdbee9c7946a9f4342c1e1ba4c983e182449ade54211d833e4d870123ae09565f65df0c97b81a632a8f313dc04ce6e284e85de3b0a4b07ab0869b321d4ef71863ca17d44a9a51879dc88586c779c830c9ee725c8e87a5f4153be4dad653698ec035a3792d6450f8c638109f9c048d964268f4ba5fb6defc837498b23acef520f452979ca17006ee6e9305dc17d77e585e9e3174b7cfbdca1d7373eba06cf39e568f72cafc051ba0b46de40b52cab65d3f929d74ee6b220e21d5f3cddb0a860e63a8fbf1999cf8ecbb610695976ca2b4baee13822a242d7ba7aebf05e2bde9cbe6736c686ca09f6ee51fec64b6f03e28ebd1c6026d03ed1b321e34dccbe2aed1eefcb0d61ac6f33ab0e256d36f0ba1739931541ce8fee4c8e0bd7694f6a7a92e0767ad5aa6a6517e57d17304ee51d1735b10c103a28ac210cc9e5df14f88cb203f20b9327662c3c434ab2318fd0fca4d584f8ff4df648c309ec032131995d450760ed826d0cb193d0c8f90b060e4559d80aa1cb542dd2b0ebd8d0e9c1e5ea86e3cd927c81b3ddcdd11c9a70f1317ecdb69c5add3a30548502f5dc63f6c42bf7ae8064eccff4391edb08f2b9d8f4557548185b7bd47fa478ed2e2661c77b8b2f5bf980a89937be7fbd43fa788433ddf16de6651c817c966140ea36a4d484ee2a56d9ef13365787e9f8bdcaa01b7c326729202043a41f21b8c146bdf3170c9233403e4d8f0b9a379d3ab0360dbf4490158b2e87a772ee040e565c72f398549437c2e2307784e7f27cb01bbf0faa4a3d63ecdb0353756d7b20e765324d38993861317d9e7d88325919ffc0e91663cac87d1a9dbbb57b565f0ce2330ebe882eedb752847eb093a8858c15cbcdbd7dd9d76579510ed2d48ee4e588a9490b45f6a3ab4025fb7b36d5c41569eefcacee805402d5799bdd86cccb88d63acfbc45d0b3050a6819af76df58ce519b4e38db9357389695a62dc53a438a628956901f1f8d3178925fee52297f20ec7e2820321044ef45eb3526a3f72776a6f6cf1e256385ed3697603f2391eb7d4a713808125c7a741449e12a90322439b12fec74a6eab617c0104cc01405d92cffaf87731fc272663c7a158c65cdcb29ba89ed03ce67a576401aeda141426ccfb466b5dc7a627fefe6a42a6319c7cfbce79c0c1d3b78d6fbdb13a19a2d7f646982d62b7dbc4c7bb72e5dddb54db7040c4bd31ea5c48ea7e75ce5ab1d51da99fa1b07a045bf3236174a77d64d4071f51397fd351d27f86cb7ebe426b93d39a6ca62afd987f0530de95c4c363593e306c0df4fbe01a57b425804e10cc343a2f91ec3886c5f9d42858899f3675795bfa252ba64cc8fc228e86e997428133a76de24435b945cae6469553c2a5da40c34ea5b09bd57ea061ca738bce857b6ea71c4f8933177b05fa4214c8991649a6f8816fe7316d53ecc3e5418f47926b12ec940ff42b3e9c5690c6ffe301b9c213fb87ec64b26114eeeaae1de5fcd22aec4144d786da50261479c59136cb5377f2dd7b3adb9fb0b9a61689cc6c9d5c760baa1b1b86ab24c27997deff01c0b6755b94f3835a040d9428a4e8fcbf56bdce95f85543ebc81d0ddfa54943315e48d2e0b65d8e0568e0b489a436a09097aef5b3542eeb53f46fcdf57655c5992ff47ac16b86a8e2f94ed121128c43f56fab9be77ed0cf681c85621dc6de85cf5b096782992afeb51d236f47e7b09d81d08b3fb001b4bb81e88124a59999e51bf98470c922f6c66cd9c66da53520fb98b37959489c04a4439f66a366a5a2f7ca258c5de83a60a9b2352fdbd9102a93ba49330db79c8567899b14fb0d22366eabeaaa6fbfb7253b388995408d4f63de8908009fe4abee95b347346436f339a37b994e3c695eb9ae47f2cb327c4dd899faadce46b011944fe2ef1f32df6dbdca110bb005792eedb72e85726f3124f85b01068db7b5b827c37e527b3f9050ca4cc360af842b71aaf0ec8a9959f359fcae45df395ce6078359d571c6a75ab1a3350bd23de0922f4ffd6414b6438583e51377d3489651b8b11ed645b12a96e9cefe28d34b007757be948c9907e2aa08445454290ddf852b39ccaf2f217f626b482445fe355f47d053ed3a543a54a32c6517a45e641843638f9cf06a714572d513b6bf5c16d297780912993f8080fb1bca2245daf1c09718cc4ce239fb28b00168b332f0bfac69b1c1b00dd664111d9ba81f06ee6cde172d996971f1cf6026d4e4b6a02ee3416232509e3188c5d4c60066c4710114bce486ad42e25e6ed12dbf61a13210f6753e32807c814f420cf6c4af4d12a476c2f20e3ee2e26c21c9fff724dc5cbbc8a1b33da58c75df6769293e6db048958a9bc56f4831ca3cb0add3b9ea327998572c3a61425ef9852b6cbde37d0fd26748498aad4bd6a889d62fda6483e7d42d49a32a443b0b23e3d9d9ce7fe58e55e5a2e0f8cd9baa5daaa6e687f44c75b39dfced96e2ba3f348fe3ab24990bd8bcf3f4e8b375bba6e2aa222fcaf9f1fe4698d82684ca0c72963f3b9358fe1809266bf8fd506747712e1fa19d46daa19a7d4e42745ac217816a06a8420f609fea84be2f70ae1d6d43998ffa09abb680c3198b5e36010ef30543375e749546cf7d6919882bdd66d7943e972c2c7a68f730e9fd5efdf744306078123a47e157521beb651eae11c3990917964a502e2d325a38bd1335791749766b78adaf1862d5ade55d5aac4ca1d358d3aa449d5c62e2e94996d2f4f97f1e8f24966f2dfaac52e4df48de073dcfb1244d18b80ad97bad8f1a826dc2777864795b1bf80754d3bf84ee61e5ff5c5107b95d9e22692a99cba9ee0469cd52a3bd8834f6049a3f83a29155edfae2f4193c8fb0fb373e5cc7cf16abfad48717037320ba98eda8b3b00d69d28f2e29de35e9e2b74769d27d27fd40dc716c6f550ffcd40b35014ba68b2ff5c48a2ec7a0938ec6b08c850bae5885e4e8b91e7641010aa898d3e39e3511ce25217ea3a29a90aae370b57dd14aa8fd8fd980ec1fa4e77870d3be172747448b3c0bdbbef616c3911299733aea494a0b850cab128760d374a7df7ca868ebd64d3949a59b2b621381ba683bd8d383fd1975dcc99ebcdf9c3f04e1987f0a9ec32753ea5e53b67ba6c200ed93679ced5b4d84f5d6bd1c3abd4b32749025aecf88c9c03390455fd0b55a47e67de4f7ff78af1c40b602983d01de7d99c5084ca89749143f799f2f3d9ad588c5e5a3d75329b61c29ac19344436773515dc67f521dad7a17ef4445f3462da1caa91c8c80c3698e1961c32596e1a410cc59021375263b552267f52653cf1ba597eb21b883ff201e293c078c4aa5ad2c3569438a9a4b93141d8dda2d2ecfda7ed52bb02ae97271015bdfaa5276ca6024122ac48d6cf62b223fbd17e3c87ec58a78f745c5e01e8df088b4d5018b92d45fb1842b64a4ae93490443c1bc0a5cd170d36da85531f6c9a66c4a7db9c395518c4f2c7b06d19537a838f64f57c23b6307d9c0dc5f97396f41151485c4bfbab063b1327da49f4407738ea1385cfe0d4c763c9bee053db02e3027d47c6bf771a2cc6bf932cfd5ad42695799a634e39105ab128dd6265af8a8093ab116e98af5b2af91e34129d84a50e966408b45603f65bc54ecde64071cf6af124ebb6bcd799dc3637a2981c9bccbb86427c1b8fab76d39fabe8fa4bd01d0ca456904ca408fa7aee50824c5b0ce28357cf8da36cdde6ea83be8ed2947173d0dc3cad64b9d6601e62854063b65ea490e3a4ec24835d5689b9888b7ec6fdcda38f956741d44c84c064dd91c003e931dea393ba15f0ab47f3f6889e4e64a3d0ce1d4962b35dc2b075afe01978bb64ed91bce56749752de33988796f98fa1617e254047b8499639b27043cb3350d3efed3ffed103f720e8cbea0ae4e122493ffc683e3fc9be5a3557ba7df7127cc2a283b2ca7187228d2f8d495019766b0fa744a025e185742eb78153b4aa7e80128d6de6af1fec8ea3f715c178c86d100f9d22a036d2225ee144e29cf56737cb6c93b8f9d82694a877232e0c3b31c9a"}}, {0xc, 0x0, 0x0, 0x1, @NL80211_PKTPAT_OFFSET={0x8, 0x3, 0x7f}}, {0x28, 0x0, 0x0, 0x1, @NL80211_PKTPAT_PATTERN={0x22, 0x2, "f5bf8a3fdd833fd4f35aa085c2ffad03e9c1e8a3551cff5f2683f36f4815"}}]}, @NL80211_ATTR_COALESCE_RULE_PKT_PATTERN={0x10, 0x3, 0x0, 0x1, [{0xc, 0x0, 0x0, 0x1, @NL80211_PKTPAT_OFFSET={0x8, 0x3, 0x7}}]}]}, 0x10a8}, 0x1, 0x0, 0x0, 0x10}, 0x1) (async) socket$inet6_tcp(0xa, 0x1, 0x0) (async) bind$inet6(r3, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) (async) listen(r3, 0x0) (async) accept4(r3, 0x0, 0x0, 0x0) (async) socket$netlink(0x10, 0x3, 0x0) (async) socket$netlink(0x10, 0x3, 0x0) (async) socket(0x10, 0x803, 0x0) (async) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r6, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) (async) getsockname$packet(r6, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) (async) sendmsg$nl_route(r5, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r7, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) (async) sendmsg$nl_route(r4, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r7}]}, 0x4c}}, 0x0) (async) [ 1963.696505][T27753] bond958: entered promiscuous mode [ 1963.709303][T27753] 8021q: adding VLAN 0 to HW filter on device bond958 01:55:13 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x6203, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1963.768920][T27754] bond958: (slave bridge916): making interface the new active one [ 1963.778093][T27754] bridge916: entered promiscuous mode [ 1963.790533][T27754] bond958: (slave bridge916): Enslaving as an active interface with an up link [ 1963.802905][T27780] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 01:55:13 executing program 1: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r0, 0x0) accept4(r0, 0x0, 0x0, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket(0x25, 0x8000e, 0x891) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r3, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r3, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r4, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r4}]}, 0x4c}}, 0x0) [ 1963.888754][T27780] bond525: entered promiscuous mode [ 1963.896908][T27780] 8021q: adding VLAN 0 to HW filter on device bond525 01:55:13 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r1, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r1, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r0, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r2, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x262, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r2}]}, 0x4c}}, 0x0) [ 1963.999439][T27764] netlink: 'syz-executor.2': attribute type 1 has an invalid length. [ 1964.066263][T27764] bond905: entered promiscuous mode 01:55:13 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) (async) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) (async) r2 = socket$nl_generic(0x10, 0x3, 0x10) sendfile(r2, r1, 0x0, 0x100000002) (async) ioctl$SIOCGSKNS(r1, 0x894c, &(0x7f0000000000)={'tunl0\x00', 0x600}) (async) r3 = socket$inet_smc(0x2b, 0x1, 0x0) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(r3, 0x81f8943c, &(0x7f0000000100)={0x0, ""/256, 0x0, 0x0}) (async, rerun: 32) ioctl$BTRFS_IOC_INO_LOOKUP_USER(0xffffffffffffffff, 0xd000943e, &(0x7f0000001800)={0x0, 0x0, "02b66c11beed7c64705a2327ec95767e4393a580b2c3043a2fcfb08839b8897467f4a525091f0f7d2480b2fbdcfd2a3924b674e8aed38628fb035a463934e151ef7c0289d4fd7b308135026f77657ca78a849330727be579703bef5f51cf16a7198f8eb8e962cc55e47a88645dc99b6e4dfd15399b64e979124ef3a9120208d05d9de3ff1ce9cc9e6353b97b13c914e3530a6ec5b967674f3cbc69538c66356f6777af618dd96e1730048727e1752cc8139776c1e5061154ffe7838008ce90ccbf0827c03a28016d5f3bdabbc98bee9c405509e3e094ba1677d6b347061c346722828810ba1b68424c585770f6527f3856630aedb97f2ed5ff2e013d5d412046", "e26481ed1e7c639b5947fa03672a9556f2d9c88f35f8f8b62d6b01c1aef3d08f4ee43881217f959db47d280e8448925694f755ec0256840e58a31c14f0d78d223c58da8e0bd812fb893403e655823624c9e0581484207a6d914ad9befaa148971274f98aa0b753b38761ffcd20135aa09bee95ffc38cfb410de6eb0b1c0eaf69af8375bb982d21281acaa2966309f31ed037b8789d3bf55cda6f1fdefac7c7d4eec101525b850f72c5d3515de41ea6c4cc0a1d4c9d0e83fe98d8baa6325482d6270833cf890aba6768abf6a6ac45c0268bac824f692a521bf8500ce437d7bd4ecafbf918c063d8af3d110e24ddc569f535794d4c8c4ab3897b27310c8d39efca731b7a22caf0ec5f2df04ce6b496582e72b5cbb10480d59c191cd3eca3d3a973fe653216cd08c8de4098133f85da499af2f6a6c7513755f40d13810388f5476a67bb722dc832e4af4c76fda32aa9699c8191a644df90df4b2b2e7993e90bee48e3b65cbc84de44a15926c157018e46c849d57933b96f67dcec40eb733515b880aabbfd1324433ef61e0a112430d3829717820a9eb79c8767614facf386e07a7df2ceb2e9f9d33d65d33fecc7697f5bf74769a67ac297756ec495eaf87674ad5fae2fd772301896e85c617328f32f69bb718bf1ade1d8fd637a6bb4c7044929bf43757821b2bde4ce2ec164ef3841458207326249547ccd2c3ca3467e8c5474cb820ee8647c90d2da6af054fa1f25afb1b0792dcf21b2736c67fc8119a6bdbe19689bb639113f5b9165a50b6f2df8dd8c549220488da3bb10e45dcc7a6207635a550e5ee913aa8ec6ea5f92ba59eb003424e6ea472df633220c8e9644d5bf2f4b01027fc5c0422c39932531e1be7e62e021ac4592b57d95720d5ae86f4bd11d95ad82569ab24e0d1b5a144e6fbcc76c4d7800a70069a852979a3a1f02fcbef6bfeff2101576c998109c65350175bd05435d4be236097340cfafadfde768b1176e6c40f34731164afcd0c3620a0cd015494e432e9aff2c59a4cf476d9037a398183fe74789da0300172e1c02173ff867faf6fbed165ca819be3e4ea05fb6ff1633430984bece64ef93ff8d012b9c321941d59f5b8572008f6bb22597864ea1fbd6b9e349b6d9dc1adc185ea32d5e67e44ac5ebea2063635a53e1718a4246ea47a8874daae0ac6653f821f381a940cbfc98d4a25aea33c63309ee1c6c20f349b673509f09b5381956611359754bdb2eeda100ccb4834596de45556611d5b568f2604653c2129a7d5bd50a209a6d4a956c108b5349a8d208431a2edb17d7650332d308ea2e6f324e589f0e98de49ab655a77509637f0a65614c33f16f91e0d6305e8f9ba0766154e1599f67005ff75af7a6143e574acb799f2363dbb37971fc451c0054b5753de3e2278afe06b9e64b93ab09b4985fe26626a6266da1088f7b9755ac8a9e4e6e99a0dedfe2b1509de12f75d9dea83475543d2b328e986779f4992c870ad128b9d09e8ebcc264e8bb5b85d6d62ab902b8ed7409448cc26a777882627bf3e0c9cdd473289154560c2838c6d4fdaea97ff5d7992909c67710dfeb4977ea7bc06d77b06a19efa42178c46a3fa66ed7d6e2b67ab86be5d94721b099947563db706c5c3a12744ea61d63fce93f546f2669c50b6568e3f32d79bfc75adefc21ee93c5c5360cf2e7ac19571c3663857baf3325b666570642da4dcb309dab05b7ad05bf832d28fa8e871f7b89d6f4327953f4dbde3aac022f4dfe050d0924427e39a8027fbc4b54c4a8c2bf35f8c11c9c0ac4bc1fc31d6bdb2cee2675c5a782aa3ab5ffcc7be7207c89cdb8546fcfdd3a5aadfee5a296abd9afc127ebbf580649e9132b55d9f40a3778af49248b593e00c9f4812b8f7adb7323ab7852e4ed09c55aa356b2e8fee6eb14be3659fdf821d23ac556845f70670d8b6a417c29ac0b1585ea865634c0c921e9d930d4018f66d1e024179d80a7154c491c8662d427ab44a3633480782889f1d00c1d182ede30d4127d769291ef408b574a41c29153b7d949d4648b60a4faf32b380dbc87146c36479d47023876abd4ae7e289ba79d988f6c3a86a75bdc784bea2f0b7e2f77cb1282f54a18e1117e50ffa46ac208fcf8a7b5751e83b3e80cb428e4c2ae63ca3cf7b2c4353303edfe328626391f7925f1ab4ef1c7d0f54d0e45590188ecdb6d2c9c0db9b0552dc81b21dc1bbe9e94be5f78dad11f53d6fc3602e9ffb872d3557c44a133ee94e50d57d5eae2214569975982c63f22750db6f5c979ed81b3f68fe6be6daa85bcd98f25548c1d4db1f15394bc708933e0352e4059bedbf832abaf75e9bdca6fea93600357ca4153357d2e0661a308edf82c0f53e7a121c7e1e8bafdc5fdb61c21f4716d06b43a8395aa915a34c4dc4b5b15ba70a4e163728a9966515682b9ca3c2499aeb17a3c17f905fefed806b504f77c52564006273282db5e0c70565ee1016fc7c241830bef951294db9682ef41742b6550246e539143f15d6c2f017a8083ec97eb3a2de8bc2d8d5fec2b9b88996b3ff6c5aa5bed326f72fe9190de74a83e380b9fe89324421697d124d9a1bd3ced8e1856923147958582d737a36da6af4a0fd92b83f0ef0cb1a725d3a5ecb3a39ad039d200989a281a0686336457824ef582698222b7a063475b793a21745d6701940a0baf124461ad71de8dff6a6f7ed676d107e01ae6b7b79aa1f96efce91039dead977bf70365de8ebb3ec06634246d62f78287831984accef27048794468f520db5c71b4fa94818ddc7394fa1b609adb8cc80c0f32efac26a47bd74119100f0cfdcb889aab1019786cc0eefc1e4295ae919e9f6c8c8a16aa76a2bfe39adf5929e9b9925da0241e734378fe140981e3536bf0b77eb0263297a936a5f37605f128d79b16723b953331f99633b8d29ad1d1dbdb74188488ea0d1b7e0ddb652c040aa0477a079e92618e52f3b7ceebd62e0f0c6946469c19ae828b7eed288c3a53320c9d5468c39d608bd42d967a21e6b788de7c6826d1b99130083182562f63443290aeeb24308d3eb4881547db34b284d9af2bd7cac0d2f66bd14758c079c345d2bce3e1efc3599b0d06e69a92db7e05473673726e1848df75e83df50f98a9321468c10c6514dd7b3cb5b0ddf2fef13284463fe88ef2bc95d51288e41e3e0ba5c91bd686d7f7658b5bac8e3991a0f3b36f004585d9edd09d478ede73e7da067ea502894fc1247e62c1a84c9065ffccc3da96f07ddce135ceafdd784dda6f64a7add400d21ae13abf98e90fb96fcda23a8ad79905428a349b2230c19cf8cdca1724382bd19b4b075438098bd46cbc668bcbdcf1da85f733a50669f976a7106ff1936f20d799e7d01b0beeba7057a90016fb2d36bb3d14e11ac077ffb91f139d16ae5e78d84559312c9fd1a91ff70e5d9b4fd27940415d2f605665cd9374a50f7fc3c32efe30e4c0fc84f0c0e0bcd35e46665e4f29371c1c96324f65a94c85874e8e3baba68b88acc85c38f466d7353b5a00953c8ff5522cf0903e646301e3539d047286d10be16d9fbef02d450b7b12b1ead250f68c4c893fbc6f48765f34a81c477966278a1c6945d14d6531f0b5e4cef4edaf3708a5787162c39b236272694b0a302465d01004fa9d516414c040c27fccbf38466023b06293bd07f31fcdbb3b5378a8b2c7886e1551b1caeab96f7f3c159075756f5f8aaa05b6f87f581d802903c36d84169f87c01a77c43284adf66daa38fe8633a8f6d258231466aba89bb9e56ef289d2815cde433e381ddf74852302684b974c80d0a1d7d578039120a2db36213d167687e9390ed8f14c709d3149d2f5cfbc42a9b85e6d10c4ee77270534fdff2ccb816dedc6377257a73ba2a4ea82c0cc4a81bfb939f710b109beb279edeeda345867c0130ff6fc0fbbbbd25d16d2e73f6ab2fbb4c72abc0713ef9e4690b72208c468fa64c21f2247867b5edd514e5be23733ef2136b10e03161fe3b4e6018933df1f97a8250747e6140780a064e0c35bfafcb8177a75e8fde25e61e1c6332bbf7ddc3a11910feb3bd66707a7c1a9f87b320298acec88135a177b2f6f3c0ff02765f34c30b078b58470cd227ce4c0a1a77e662180b28fb360c3fbf47ba8982510979b86332b6d8a53d5f3947665c119a71e5b6ddc64228b47c3e23c30ebbede354d71d2780456de3f717a384112ccc9805dfe107fc9440ab7abccd8463150ce1306778fbea9793d88c4a7864a925bac5da593d6b72ee2c743f0b732a10285d293359c8126004f06fa12c0b3ca9c1e9c0a75b587207965c49f7450cb4210da01e5604f83f849b7eb9cb3f73de4bcff064b4fe08580971184db940bf29d6b8cea027dbfe0b78d6fa574da4b5b4db57b4e1939213c9848537baaf8076b1db4e767a467ec6a47c67aa33df96d7113cc9884124bca5579ce0058618b1b1f13ff3b102ff54e6bde7e29fd555acd6ffe6429af27301b6bbda38403d8f6f3266d7724f517778e7b2c1e13e3d83104ce743857c07b70cb5be8d6d8757d63a3bedfc9de79cc7e96c9833e15fd65f1336178568c2453a49cfc8c8dcb4f5d978f0a6144d62a5da6fd75d08a41084d4c59b345e07a5f4446af6e5216cc8d94347d333030015e262036f0a4fa6d2ae523654c55b37ac179efb66d230de5c70a9b33738ef0cd4ba2710d9ec03f426701101182051516a9be380a07e2555a8cda03eafc72d2bc2bc1dcadde4bb819692c1736b0ed203c4934842d791aae9e10bf239cc5393c9faf967109444c8f44532766ca5481f0ac16d25753a121727271c71d97b401dafe91588b362f2798f047deece9f860624b2d5753e46f929f8c2d03753e7245ffed6d8e36c7b380c4fb6a27e087a38b5e4a80f0043f95e5a20701c62692e684a764074e47badcecf8b2145be47b5b7089c249abcf0743a61b517004d120929d7846a39a46e0ddbcf5334fc01aca0bff31e67da8b3c88e38504db1dc3940c55bee158ae6dfcce289cf91106397d8e3990149a86c819e0354d785a4eed76fa6380491b01efbc23e7189ec253884d384865bca5da9a0917d68144a0a02cde867c365d339a025b7c8a16b82e341719a259ede8f09c165a354fd3e8f5d59e349e7c36302cf8ed115537969b598337fe7575157c89a254c0829cdb243d3d788321c756bf2817721db4bead96e1f25be5b8c7100d149d13900b6c6491ddbdbeaef7753ed5c5d9b07449bfd023501075ec08c37c13df696bf73500bd440a6522f5b955862c5eee8dc6e875c5055350b3397a6b31d2b764308ad24aafd4113af76f38f4aaba9e24efa3ad5b1c008a6cad2411ef6c7276dd3a5a2ae8130f91c36c34137731426fdf5272ef4ac5c4415e2d0f7b50da3ae910ba22bb5b962351e841746b00"}) (async, rerun: 32) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(0xffffffffffffffff, 0x81f8943c, &(0x7f0000000540)={0x0, ""/256, 0x0, 0x0}) (async) ioctl$BTRFS_IOC_INO_LOOKUP_USER(0xffffffffffffffff, 0xd000943e, &(0x7f0000001800)={0x0, 0x0, "02b66c11beed7c64705a2327ec95767e4393a580b2c3043a2fcfb08839b8897467f4a525091f0f7d2480b2fbdcfd2a3924b674e8aed38628fb035a463934e151ef7c0289d4fd7b308135026f77657ca78a849330727be579703bef5f51cf16a7198f8eb8e962cc55e47a88645dc99b6e4dfd15399b64e979124ef3a9120208d05d9de3ff1ce9cc9e6353b97b13c914e3530a6ec5b967674f3cbc69538c66356f6777af618dd96e1730048727e164fcc8139776c1e5061154ffe7838008ce90ccbf0827c03a28016d5f3bdabbc98bee9c405509e3e094ba1677d6b347061c346722828810ba1b68424c585770f6527f3856630aedb97f2ee0742e013d5d412046", "e26481ed1e7c639b5947fa03672a9556f2d9c88f35f8f8b62d6b01c1aef3d08f4ee43881217f959db47d280e8448925694f755ec0256840e58a31c14f0d78d223c58da8e0bd812fb893403e655823624c9e0581484207a6d914ad9befaa148971274f98aa0b753b38761ffcd20135aa09bee95ffc38cfb410de6eb0b1c0eaf69af8375bb982d21281acaa2966378f31ed037b8789d3bf55cda6f1fdefac7c7d4eec101525b850f72c5d3515de41ea6c4cc0a1d4c9d0e83fe98d8baa6325482d6270833cf890aba6768abf6a6ac45c0268bac824f692a521bf8500ce437d7bd4ecafbf918c063d8af3d110e24ddc569f535794d4c8c4ab3897b27310c8d39efca731b7a22caf0ec5f2df04ce6b496582e72b5cbb10480d59c191cd3eca3d3a973fe653216cd08c8de4098133f85da499af2f6a6c7513755f40d13810388f5476a67bb722dc832e4af4c76fda32aa9699c8191a644df90df4b2b2e7993e90bee48e3b65cbc84de44a15926c157018e46c849d57933b96f67dcec40eb733515b880aabbfd1324433ef61e0a112430d3829717820a9eb79c8767614facf386e07a7df2ceb2e9f9d33d65d33fecc7697f5bf74769a67ac297756ec495eaf87674ad5fae2fd772301896e85c617328f32f69bb718bf1ade1d8fd637a6bb4c7044929bf43757821b2bde4ce2ec164ef3841458207326249547ccd2c3ca3467e8c5474cb820ee8647c90d2da6af054fa1f25afb1b0792dcf21b2736c67fc8119a6bdbe19689bb639113f5b9165a50b6f2df8dd8c549220488da3bb10e45dcc7a6207635a550e5ee913aa8ec6ea5f92ba59eb003424e6ea472df633220c8e9644d5bf2f4b01027fc5c0422c39932531e1be7e62e021ac4592b57d95720d5ae86f4bd11d95ad82569ab24e0d1b5a144e6fbcc76c4d7800a70069a852979a3a1f02fcbef6bfeff2101576c998109c65350175bd05435d4be236097340cfafadfde768b1176e6c40f34731164afcd0c3620a0cd015494e432e9aff2c59a4cf476d9037a398183fe74789da0300172e1c02173ff867faf6fbed165ca819be3e4ea05fb6ff1633430984bece64ef93ff8d012b9c321941d59f5b8572008f6bb22597864ea1fbd6b9e349b6d9dc1adc185ea32d5e67e44ac5ebea2063635a53e1718a4246ea47a8874daae0ac6653f821f381a940cbfc98d4a25aea33c63309ee1c6c20f349b673509f09b5381956611359754bdb2eeda100ccb4834596de45556611d5b568f2604653c2129a7d5bd50a209a6d4a956c108b5349a8d208431a2edb17d7650332d308ea2e6f324e589f0e98de49ab655a77509637f0a65614c33f16f91e0d6305e8f9ba0766154e1599f67005ff75af7a6143e574acb799f2363dbb37971fc451c0054b5753de3e2278afe06b9e64b93ab09b4985fe26626a6266da1088f7b9755ac8a9e4e6e99a0dedfe2b1509de12f75d9dea83475543d2b328e986779f4992c870ad128b9d09e8ebcc264e8bb5b85d6d62ab902b8ed7409448cc26a777882627bf3e0c9cdd473289154560c2838c6d4fdaea97ff5d7992909c67710dfeb4977ea7bc06d77b06a19efa42178c46a3fa66ed7d6e2b67ab86be5d94721b099947563db706c5c3a12744ea61d63fce93f546f2669c50b6568e3f32d79bfc75adefc21ee93c5c5360cf2e7ac19571c3663857baf3325b666570642da4dcb309dab05b7ad05bf832d28fa8e871f7b89d6f4327953f4dbde3aac022f4dfe050d0924427e39a8027fbc4b54c4a8c2bf35f8c11c9c0ac4bc1fc31d6bdb2cee2675c5a782aa3ab5ffcc7be7207c89cdb8546fcfdd3a5aadfee5a296abd9afc127ebbf580649e9132b55d9f40a3778af49248b593e00c9f4812b8f7adb7323ab7852e4ed09c55aa356b2e8fee6eb14be3659fdf821d23ac556845f70670d8b6a417c29ac0b1585ea865634c0c921e9d930d4018f66d1e024179d80a7154c491c8662d427ab44a3633480782889f1d00c1d182ede30d4127d769291ef408b574a41c29153b7d949d4648b60a4faf32b380dbc87146c36479d47023876abd4ae7e289ba79d988f6c3a86a75bdc784bea2f0b7e2f77cb1282f54a18e1117e50ffa46ac208fcf8a7b5751e83b3e80cb428e4c2ae63ca3cf7b2c4353303edfe328626391f7925f1ab4ef1c7d0f54d0e45590188ecdb6d2c9c0db9b0552dc81b21dc1bbe9e94be5f78dad11f53d6fc3602e9ffb872d3557c44a133ee94e50d57d5eae2214569975982c63f22750db6f5c979ed81b3f68fe6be6daa85bcd98f25548c1d4db1f15394bc708933e0352e4059bedbf832abaf75e9bdca6fea93600357ca4153357d2e0661a308edf82c0f53e7a121c7e1e8bafdc5fdb61c21f4716d06b43a8395aa915a34c4dc4b5b15ba70a4e163728a9966515682b9ca3c2499aeb17a3c17f905fefed806b504f77c52564006273282db5e0c70565ee1016fc7c241830bef951294db9682ef41742b6550246e539143f15d6c2f017a8083ec97eb3a2de8bc2d8d5fec2b9b88996b3ff6c5aa5bed326f72fe9190de74a83e380b9fe89324421697d124d9a1bd3ced8e1856923147958582d737a36da6af4a0fd92b83f0ef0cb1a725d3a5ecb3a39ad039d200989a281a0686336457824ef582698222b7a063475b793a21745d6701940a0baf124461ad71de8dff6a6f7ed676d107e01ae6b7b79aa1f96efce91039dead977bf70365de8ebb3ec06634246d62f78287831984accef27048794468f520db5c71b4fa94818ddc7394fa1b609adb8cc80c0f32efac26a47bd74119100f0cfdcb889aab1019786cc0eefc1e4295ae919e9f6c8c8a16aa76a2bfe39adf5929e9b9925da0241e734378fe140981e3536bf0b77eb0263297a936a5f37605f128d79b16723b953331f99633b8d29ad1d1dbdb74188488ea0d1b7e0ddb652c040aa0477a079e92618e52f3b7ceebd62e0f0c6946469c19ae828b7eed288c3a53320c9d5468c39d608bd42d967a21e6b788de7c6826d1b99130083182562f63443290aeeb24308d3eb4881547db34b284d9af2bd7cac0d2f66bd14758c079c345d2bce3e1efc3599b0d06e69a92db7e05473673726e1848df75e83df50f98a9321468c10c6514dd7b3cb5b0ddf2fef13284463fe88ef2bc95d51288e41e3e0ba5c91bd686d7f7658b5bac8e3991a0f3b36f004585d9edd09d478ede73e7da067ea502894fc1247e62c1a84c9065ffccc3da96f07ddce135ceafdd784dda6f64a7add400d21ae13abf98e90fb96fcda23a8ad79905428a349b2230c19cf8cdca1724382bd19b4b075438098bd46cbc668bcbdcf1da85f733a50669f976a7106ff1936f20d799e7d01b0beeba7057a90016fb2d36bb3d14e11ac077ffb91f139d16ae5e78d84559312c9fd1a91ff70e5d9b4fd279420f7647151fe951b705082230ad20415d2f605665cd9374a50f7fc3c32efe30e4c0fc84f0c0e0bcd35e46665e4f29371c1c96324f65a94c85874e8e3baba68b88acc85c38f466d7353b5a00953c8ff5522cf0903e646301e3539d047286d10be16d9fbef02d450b7b12b1ead250f68c4c893fbc6f48765f34a81c477966278a1c6945d14d6531f0b5e4cef4edaf3708a5787162c39b236272694b0a302465d01004fa9d516414c040c27fccbf38466023b06293bd07f31fcdbb3b5378a8b2c7886e1551b1caeab96f7f3c159075756f5f8aaa05b6f87f581d802903c36d84169f87c01a77c43284adf66daa38fe8633a8f6d258231466aba89bb9e56ef289d2815cde433e381ddf74852302684b974c80d0a1d7d578039120a2db36213d167687e9390ed8f14c709d3149d2f5cfbc42a9b85e6d10c4ee77270534fdff2ccb816dedc6377257a73ba2a4ea82c0cc4a81bfb939f710b109beb279edeeda345867c0130ff6fc0fbbbbd25d16d2e73f6ab2fbb4c72abc0713ef9e4690b72208c468fa64c21f2247867b5edd514e5be23733ef2136b10e03161fe3b4e6018933df1f97a8250747e6140780a064e0c35bfafcb8177a75e8fde25e61e1c6332bbf7ddc3a11910feb3bd66707a7c1a9f87b320298acec88135a177b2f6f3c0ff02765f34c30b078b58470cd227ce4c0a1a77e662180b28fb360c3fbf47ba8982510979b86332b6d8a53d5f3947665c119a71e5b6ddc64228b47c3e23c30ebbede354d71d2780456de3f717a384112ccc9805dfe107fc9440ab7abccd8463150ce1306778fbea9793d88c4a7864a925bac5da593d6b72ee2c743f0b732a10285d293359c8126004f06fa12c0b3ca9c1e9c0a75b587207965c49f7450cb4210da01e5604f83f849b7eb9cb3f73de4bcff064b4fe08580971184db940bf29d6b8cea027dbfe0b78d6fa574da4b5b4db57b4e1939213c9848537baaf8076b1db4e767a467ec6a47c67aa33df96d7113cc9884124bca5579ce0058618b1b1f13ff3b102ff54e6bde7e29fd555acd6ffe6429af27301b6bbda38403d8f6f3266d7724f517778e7b2c1e13e3d83104ce743857c07b70cb5be8d6d8757d63a3bedfc9de79cc7e96c9833e15fd65f1336178568c2453a49cfc8c8dcb4f5d978f0a6144d62a5da6fd75d08a41084d4c59b345e07a5f4446af6e5216cc8d94347d333030015e262036f0a4fa6d2ae523654c55b37ac179efb66d230de5c70a9b33738ef0cd4ba2710d9ec03f426701101182051516a9be380a07e2555a8cda03eafc72d2bc2bc1dcadde4bb819692c1736b0ed203c4934842d791aae9e10bf239cc5393c9faf967109444c8f44532766ca5481f0ac16d25753a121727271c71d97b401dafe91588b362f2798f047deece9f860624b2d5753e46f929f8c2d03753e7245ffed6d8e36c7b380c4fb6a27e087a38b5e4a80f0043f95e5a20701c62692e684a764074e47badcecf8b2145be47b5b7089c249abcf0743a61b517004d120929d7846a39a46e0ddbcf5334fc01aca0bff31e67da8b3c88e38504db1dc3940c55bee158ae6dfcce289cf91106397d8e3990149a86c819e0354d785a4eed76fa6380491b01efbc23e7189ec253884d384865bca5da9a0917d68144a0a02cde867c365d339a025b7c8a16b82e341719a259ede8f09c165a354fd3e8f5d59e349e7c36302cf8ed115537969b598337fe7575157c89a254c0829cdb243d3d788321c756bf2817721db4bead96e1f25be5b8c7100d149d13900b6c6491ddbdbeaef7753ed5c5d9b07449bfd023501075ec08c37c13df696bf73500bd440a6522f5b955862c5eee8dc6e875c5055350b3397a6b31d2b764308ad24aafd4113af76f38f4aaba9e24efa3ad5b1c008a6cad2411ef6c7276dd3a5a2ae8130f91c36c34137731426fdf5272ef4ac5c4415e2d0f7b50da3ae910ba22bb5b962351e841746b"}) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(0xffffffffffffffff, 0xd000943d, &(0x7f00000745c0)={0x5af, [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r9}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r7}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r11, r10}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r10}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r8}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r10}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r8}, {}, {}, {}, {}, {}, {}, {0x0, r7}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r8}], 0x81, "7464fbe08eb369"}) r12 = socket$netlink(0x10, 0x3, 0xf) ioctl$sock_SIOCSIFVLAN_GET_VLAN_VID_CMD(r12, 0x8983, &(0x7f0000000040)) (async, rerun: 32) ioctl$sock_SIOCGIFVLAN_DEL_VLAN_CMD(r12, 0x8982, 0x0) (async, rerun: 32) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(r12, 0x81f8943c, &(0x7f0000002c40)={0x0, ""/256, 0x0, 0x0}) ioctl$BTRFS_IOC_INO_LOOKUP_USER(0xffffffffffffffff, 0xd000943e, &(0x7f0000001800)={0x0, r13, "02b66c11beed7c64705a2327ec95767e4393a580b2c3043a2fcfb08839b8897467f4a525091f0f7d2480b2fbdcfd2a3924b674e8aed38628fb035a463934e151ef7c0289d4fd7b308135026f77657ca78a849330727be579703bef5f51cf16a7198f8eb8e962cc55e47a88645dc99b6e4dfd15399b64e979124ef3a9120208d05d9de3ff1ce9cc9e6353b97b13c914e3530a6ec5b967674f3cbc69538c66356f6777af618dd96e1730048727e1752cc8139776c1e5061154ffe7838008ce90ccbf0827c03a28016d5f3bdabbc98bee9c405509e3e094ba1677d6b347061c346722828810ba1b68424c585770f6527f3856630aedb97f2ed5ff2e013d5d412046", "e26481ed1e7c639b5947fa03672a9556f2d9c88f35f8f8b62d6b01c1aef3d08f4ee43881217f959db47d280e8448925694f755ec0256840e58a31c14f0d78d223c58da8e0bd812fb893403e655823624c9e0581484207a6d914ad9befaa148971274f98aa0b753b38761ffcd20135aa09bee95ffc38cfb410de6eb0b1c0eaf69af8375bb982d21281acaa2966309f31ed037b8789d3bf55cda6f1fdefac7c7d4eec101525b850f72c5d3515de41ea6c4cc0a1d4c9d0e83fe98d8baa6325482d6270833cf890aba6768abf6a6ac45c0268bac824f692a521bf8500ce437d7bd4ecafbf918c063d8af3d110e24ddc569f535794d4c8c4ab3897b27310c8d39efca731b7a22caf0ec5f2df04ce6b496582e72b5cbb10480d59c191cd3eca3d3a973fe653216cd08c8de4098133f85da499af2f6a6c7513755f40d13810388f5476a67bb722dc832e4af4c76fda32aa9699c8191a644df90df4b2b2e7993e90bee48e3b65cbc84de44a15926c157018e46c849d57933b96f67dcec40eb733515b880aabbfd1324433ef61e0a112430d3829717820a9eb79c8767614facf386e07a7df2ceb2e9f9d33d65d33fecc7697f5bf74769a67ac297756ec495eaf87674ad5fae2fd772301896e85c617328f32f69bb718bf1ade1d8fd637a6bb4c7044929bf43757821b2bde4ce2ec164ef3841458207326249547ccd2c3ca3467e8c5474cb820ee8647c90d2da6af054fa1f25afb1b0792dcf21b2736c67fc8119a6bdbe19689bb639113f5b9165a50b6f2df8dd8c549220488da3bb10e45dcc7a6207635a550e5ee913aa8ec6ea5f92ba59eb003424e6ea472df633220c8e9644d5bf2f4b01027fc5c0422c39932531e1be7e62e021ac4592b57d95720d5ae86f4bd11d95ad82569ab24e0d1b5a144e6fbcc76c4d7800a70069a852979a3a1f02fcbef6bfeff2101576c998109c65350175bd05435d4be236097340cfafadfde768b1176e6c40f34731164afcd0c3620a0cd015494e432e9aff2c59a4cf476d9037a398183fe74789da0300172e1c02173ff867faf6fbed165ca819be3e4ea05fb6ff1633430984bece64ef93ff8d012b9c321941d59f5b8572008f6bb22597864ea1fbd6b9e349b6d9dc1adc185ea32d5e67e44ac5ebea2063635a53e1718a4246ea47a8874daae0ac6653f821f381a940cbfc98d4a25aea33c63309ee1c6c20f349b673509f09b5381956611359754bdb2eeda100ccb4834596de45556611d5b568f2604653c2129a7d5bd50a209a6d4a956c108b5349a8d208431a2edb17d7650332d308ea2e6f324e589f0e98de49ab655a77509637f0a65614c33f16f91e0d6305e8f9ba0766154e1599f67005ff75af7a6143e574acb799f2363dbb37971fc451c0054b5753de3e2278afe06b9e64b93ab09b4985fe26626a6266da1088f7b9755ac8a9e4e6e99a0dedfe2b1509de12f75d9dea83475543d2b328e986779f4992c870ad128b9d09e8ebcc264e8bb5b85d6d62ab902b8ed7409448cc26a777882627bf3e0c9cdd473289154560c2838c6d4fdaea97ff5d7992909c67710dfeb4977ea7bc06d77b06a19efa42178c46a3fa66ed7d6e2b67ab86be5d94721b099947563db706c5c3a12744ea61d63fce93f546f2669c50b6568e3f32d79bfc75adefc21ee93c5c5360cf2e7ac19571c3663857baf3325b666570642da4dcb309dab05b7ad05bf832d28fa8e871f7b89d6f4327953f4dbde3aac022f4dfe050d0924427e39a8027fbc4b54c4a8c2bf35f8c11c9c0ac4bc1fc31d6bdb2cee2675c5a782aa3ab5ffcc7be7207c89cdb8546fcfdd3a5aadfee5a296abd9afc127ebbf580649e9132b55d9f40a3778af49248b593e00c9f4812b8f7adb7323ab7852e4ed09c55aa356b2e8fee6eb14be3659fdf821d23ac556845f70670d8b6a417c29ac0b1585ea865634c0c921e9d930d4018f66d1e024179d80a7154c491c8662d427ab44a3633480782889f1d00c1d182ede30d4127d769291ef408b574a41c29153b7d949d4648b60a4faf32b380dbc87146c36479d47023876abd4ae7e289ba79d988f6c3a86a75bdc784bea2f0b7e2f77cb1282f54a18e1117e50ffa46ac208fcf8a7b5751e83b3e80cb428e4c2ae63ca3cf7b2c4353303edfe328626391f7925f1ab4ef1c7d0f54d0e45590188ecdb6d2c9c0db9b0552dc81b21dc1bbe9e94be5f78dad11f53d6fc3602e9ffb872d3557c44a133ee94e50d57d5eae2214569975982c63f22750db6f5c979ed81b3f68fe6be6daa85bcd98f25548c1d4db1f15394bc708933e0352e4059bedbf832abaf75e9bdca6fea93600357ca4153357d2e0661a308edf82c0f53e7a121c7e1e8bafdc5fdb61c21f4716d06b43a8395aa915a34c4dc4b5b15ba70a4e163728a9966515682b9ca3c2499aeb17a3c17f905fefed806b504f77c52564006273282db5e0c70565ee1016fc7c241830bef951294db9682ef41742b6550246e539143f15d6c2f017a8083ec97eb3a2de8bc2d8d5fec2b9b88996b3ff6c5aa5bed326f72fe9190de74a83e380b9fe89324421697d124d9a1bd3ced8e1856923147958582d737a36da6af4a0fd92b83f0ef0cb1a725d3a5ecb3a39ad039d200989a281a0686336457824ef582698222b7a063475b793a21745d6701940a0baf124461ad71de8dff6a6f7ed676d107e01ae6b7b79aa1f96efce91039dead977bf70365de8ebb3ec06634246d62f78287831984accef27048794468f520db5c71b4fa94818ddc7394fa1b609adb8cc80c0f32efac26a47bd74119100f0cfdcb889aab1019786cc0eefc1e4295ae919e9f6c8c8a16aa76a2bfe39adf5929e9b9925da0241e734378fe140981e3536bf0b77eb0263297a936a5f37605f128d79b16723b953331f99633b8d29ad1d1dbdb74188488ea0d1b7e0ddb652c040aa0477a079e92618e52f3b7ceebd62e0f0c6946469c19ae828b7eed288c3a53320c9d5468c39d608bd42d967a21e6b788de7c6826d1b99130083182562f63443290aeeb24308d3eb4881547db34b284d9af2bd7cac0d2f66bd14758c079c345d2bce3e1efc3599b0d06e69a92db7e05473673726e1848df75e83df50f98a9321468c10c6514dd7b3cb5b0ddf2fef13284463fe88ef2bc95d51288e41e3e0ba5c91bd686d7f7658b5bac8e3991a0f3b36f004585d9edd09d478ede73e7da067ea502894fc1247e62c1a84c9065ffccc3da96f07ddce135ceafdd784dda6f64a7add400d21ae13abf98e90fb96fcda23a8ad79905428a349b2230c19cf8cdca1724382bd19b4b075438098bd46cbc668bcbdcf1da85f733a50669f976a7106ff1936f20d799e7d01b0beeba7057a90016fb2d36bb3d14e11ac077ffb91f139d16ae5e78d84559312c9fd1a91ff70e5d9b4fd27940415d2f605665cd9374a50f7fc3c32efe30e4c0fc84f0c0e0bcd35e46665e4f29371c1c96324f65a94c85874e8e3baba68b88acc85c38f466d7353b5a00953c8ff5522cf0903e646301e3539d047286d10be16d9fbef02d450b7b12b1ead250f68c4c893fbc6f48765f34a81c477966278a1c6945d14d6531f0b5e4cef4edaf3708a5787162c39b236272694b0a302465d01004fa9d516414c040c27fccbf38466023b06293bd07f31fcdbb3b5378a8b2c7886e1551b1caeab96f7f3c159075756f5f8aaa05b6f87f581d802903c36d84169f87c01a77c43284adf66daa38fe8633a8f6d258231466aba89bb9e56ef289d2815cde433e381ddf74852302684b974c80d0a1d7d578039120a2db36213d167687e9390ed8f14c709d3149d2f5cfbc42a9b85e6d10c4ee77270534fdff2ccb816dedc6377257a73ba2a4ea82c0cc4a81bfb939f710b109beb279edeeda345867c0130ff6fc0fbbbbd25d16d2e73f6ab2fbb4c72abc0713ef9e4690b72208c468fa64c21f2247867b5edd514e5be23733ef2136b10e03161fe3b4e6018933df1f97a8250747e6140780a064e0c35bfafcb8177a75e8fde25e61e1c6332bbf7ddc3a11910feb3bd66707a7c1a9f87b320298acec88135a177b2f6f3c0ff02765f34c30b078b58470cd227ce4c0a1a77e662180b28fb360c3fbf47ba8982510979b86332b6d8a53d5f3947665c119a71e5b6ddc64228b47c3e23c30ebbede354d71d2780456de3f717a384112ccc9805dfe107fc9440ab7abccd8463150ce1306778fbea9793d88c4a7864a925bac5da593d6b72ee2c743f0b732a10285d293359c8126004f06fa12c0b3ca9c1e9c0a75b587207965c49f7450cb4210da01e5604f83f849b7eb9cb3f73de4bcff064b4fe08580971184db940bf29d6b8cea027dbfe0b78d6fa574da4b5b4db57b4e1939213c9848537baaf8076b1db4e767a467ec6a47c67aa33df96d7113cc9884124bca5579ce0058618b1b1f13ff3b102ff54e6bde7e29fd555acd6ffe6429af27301b6bbda38403d8f6f3266d7724f517778e7b2c1e13e3d83104ce743857c07b70cb5be8d6d8757d63a3bedfc9de79cc7e96c9833e15fd65f1336178568c2453a49cfc8c8dcb4f5d978f0a6144d62a5da6fd75d08a41084d4c59b345e07a5f4446af6e5216cc8d94347d333030015e262036f0a4fa6d2ae523654c55b37ac179efb66d230de5c70a9b33738ef0cd4ba2710d9ec03f426701101182051516a9be380a07e2555a8cda03eafc72d2bc2bc1dcadde4bb819692c1736b0ed203c4934842d791aae9e10bf239cc5393c9faf967109444c8f44532766ca5481f0ac16d25753a121727271c71d97b401dafe91588b362f2798f047deece9f860624b2d5753e46f929f8c2d03753e7245ffed6d8e36c7b380c4fb6a27e087a38b5e4a80f0043f95e5a20701c62692e684a764074e47badcecf8b2145be47b5b7089c249abcf0743a61b517004d120929d7846a39a46e0ddbcf5334fc01aca0bff31e67da8b3c88e38504db1dc3940c55bee158ae6dfcce289cf91106397d8e3990149a86c819e0354d785a4eed76fa6380491b01efbc23e7189ec253884d384865bca5da9a0917d68144a0a02cde867c365d339a025b7c8a16b82e341719a259ede8f09c165a354fd3e8f5d59e349e7c36302cf8ed115537969b598337fe7575157c89a254c0829cdb243d3d788321c756bf2817721db4bead96e1f25be5b8c7100d149d13900b6c6491ddbdbeaef7753ed5c5d9b07449bfd023501075ec08c37c13df696bf73500bd440a6522f5b955862c5eee8dc6e875c5055350b3397a6b31d2b764308ad24aafd4113af76f38f4aaba9e24efa3ad5b1c008a6cad2411ef6c7276dd3a5a2ae8130f91c36c34137731426fdf5272ef4ac5c4415e2d0f7b50da3ae910ba22bb5b962351e841746b00"}) (async, rerun: 32) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(0xffffffffffffffff, 0x81f8943c, &(0x7f0000000540)={0x0, ""/256, 0x0, 0x0}) (async, rerun: 32) ioctl$BTRFS_IOC_INO_LOOKUP_USER(0xffffffffffffffff, 0xd000943e, &(0x7f0000001800)={0x0, 0x0, "02b66c11beed7c64705a2327ec95767e4393a580b2c3043a2fcfb08839b8897467f4a525091f0f7d2480b2fbdcfd2a3924b674e8aed38628fb035a463934e151ef7c0289d4fd7b308135026f77657ca78a849330727be579703bef5f51cf16a7198f8eb8e962cc55e47a88645dc99b6e4dfd15399b64e979124ef3a9120208d05d9de3ff1ce9cc9e6353b97b13c914e3530a6ec5b967674f3cbc69538c66356f6777af618dd96e1730048727e164fcc8139776c1e5061154ffe7838008ce90ccbf0827c03a28016d5f3bdabbc98bee9c405509e3e094ba1677d6b347061c346722828810ba1b68424c585770f6527f3856630aedb97f2ee0742e013d5d412046", "e26481ed1e7c639b5947fa03672a9556f2d9c88f35f8f8b62d6b01c1aef3d08f4ee43881217f959db47d280e8448925694f755ec0256840e58a31c14f0d78d223c58da8e0bd812fb893403e655823624c9e0581484207a6d914ad9befaa148971274f98aa0b753b38761ffcd20135aa09bee95ffc38cfb410de6eb0b1c0eaf69af8375bb982d21281acaa2966378f31ed037b8789d3bf55cda6f1fdefac7c7d4eec101525b850f72c5d3515de41ea6c4cc0a1d4c9d0e83fe98d8baa6325482d6270833cf890aba6768abf6a6ac45c0268bac824f692a521bf8500ce437d7bd4ecafbf918c063d8af3d110e24ddc569f535794d4c8c4ab3897b27310c8d39efca731b7a22caf0ec5f2df04ce6b496582e72b5cbb10480d59c191cd3eca3d3a973fe653216cd08c8de4098133f85da499af2f6a6c7513755f40d13810388f5476a67bb722dc832e4af4c76fda32aa9699c8191a644df90df4b2b2e7993e90bee48e3b65cbc84de44a15926c157018e46c849d57933b96f67dcec40eb733515b880aabbfd1324433ef61e0a112430d3829717820a9eb79c8767614facf386e07a7df2ceb2e9f9d33d65d33fecc7697f5bf74769a67ac297756ec495eaf87674ad5fae2fd772301896e85c617328f32f69bb718bf1ade1d8fd637a6bb4c7044929bf43757821b2bde4ce2ec164ef3841458207326249547ccd2c3ca3467e8c5474cb820ee8647c90d2da6af054fa1f25afb1b0792dcf21b2736c67fc8119a6bdbe19689bb639113f5b9165a50b6f2df8dd8c549220488da3bb10e45dcc7a6207635a550e5ee913aa8ec6ea5f92ba59eb003424e6ea472df633220c8e9644d5bf2f4b01027fc5c0422c39932531e1be7e62e021ac4592b57d95720d5ae86f4bd11d95ad82569ab24e0d1b5a144e6fbcc76c4d7800a70069a852979a3a1f02fcbef6bfeff2101576c998109c65350175bd05435d4be236097340cfafadfde768b1176e6c40f34731164afcd0c3620a0cd015494e432e9aff2c59a4cf476d9037a398183fe74789da0300172e1c02173ff867faf6fbed165ca819be3e4ea05fb6ff1633430984bece64ef93ff8d012b9c321941d59f5b8572008f6bb22597864ea1fbd6b9e349b6d9dc1adc185ea32d5e67e44ac5ebea2063635a53e1718a4246ea47a8874daae0ac6653f821f381a940cbfc98d4a25aea33c63309ee1c6c20f349b673509f09b5381956611359754bdb2eeda100ccb4834596de45556611d5b568f2604653c2129a7d5bd50a209a6d4a956c108b5349a8d208431a2edb17d7650332d308ea2e6f324e589f0e98de49ab655a77509637f0a65614c33f16f91e0d6305e8f9ba0766154e1599f67005ff75af7a6143e574acb799f2363dbb37971fc451c0054b5753de3e2278afe06b9e64b93ab09b4985fe26626a6266da1088f7b9755ac8a9e4e6e99a0dedfe2b1509de12f75d9dea83475543d2b328e986779f4992c870ad128b9d09e8ebcc264e8bb5b85d6d62ab902b8ed7409448cc26a777882627bf3e0c9cdd473289154560c2838c6d4fdaea97ff5d7992909c67710dfeb4977ea7bc06d77b06a19efa42178c46a3fa66ed7d6e2b67ab86be5d94721b099947563db706c5c3a12744ea61d63fce93f546f2669c50b6568e3f32d79bfc75adefc21ee93c5c5360cf2e7ac19571c3663857baf3325b666570642da4dcb309dab05b7ad05bf832d28fa8e871f7b89d6f4327953f4dbde3aac022f4dfe050d0924427e39a8027fbc4b54c4a8c2bf35f8c11c9c0ac4bc1fc31d6bdb2cee2675c5a782aa3ab5ffcc7be7207c89cdb8546fcfdd3a5aadfee5a296abd9afc127ebbf580649e9132b55d9f40a3778af49248b593e00c9f4812b8f7adb7323ab7852e4ed09c55aa356b2e8fee6eb14be3659fdf821d23ac556845f70670d8b6a417c29ac0b1585ea865634c0c921e9d930d4018f66d1e024179d80a7154c491c8662d427ab44a3633480782889f1d00c1d182ede30d4127d769291ef408b574a41c29153b7d949d4648b60a4faf32b380dbc87146c36479d47023876abd4ae7e289ba79d988f6c3a86a75bdc784bea2f0b7e2f77cb1282f54a18e1117e50ffa46ac208fcf8a7b5751e83b3e80cb428e4c2ae63ca3cf7b2c4353303edfe328626391f7925f1ab4ef1c7d0f54d0e45590188ecdb6d2c9c0db9b0552dc81b21dc1bbe9e94be5f78dad11f53d6fc3602e9ffb872d3557c44a133ee94e50d57d5eae2214569975982c63f22750db6f5c979ed81b3f68fe6be6daa85bcd98f25548c1d4db1f15394bc708933e0352e4059bedbf832abaf75e9bdca6fea93600357ca4153357d2e0661a308edf82c0f53e7a121c7e1e8bafdc5fdb61c21f4716d06b43a8395aa915a34c4dc4b5b15ba70a4e163728a9966515682b9ca3c2499aeb17a3c17f905fefed806b504f77c52564006273282db5e0c70565ee1016fc7c241830bef951294db9682ef41742b6550246e539143f15d6c2f017a8083ec97eb3a2de8bc2d8d5fec2b9b88996b3ff6c5aa5bed326f72fe9190de74a83e380b9fe89324421697d124d9a1bd3ced8e1856923147958582d737a36da6af4a0fd92b83f0ef0cb1a725d3a5ecb3a39ad039d200989a281a0686336457824ef582698222b7a063475b793a21745d6701940a0baf124461ad71de8dff6a6f7ed676d107e01ae6b7b79aa1f96efce91039dead977bf70365de8ebb3ec06634246d62f78287831984accef27048794468f520db5c71b4fa94818ddc7394fa1b609adb8cc80c0f32efac26a47bd74119100f0cfdcb889aab1019786cc0eefc1e4295ae919e9f6c8c8a16aa76a2bfe39adf5929e9b9925da0241e734378fe140981e3536bf0b77eb0263297a936a5f37605f128d79b16723b953331f99633b8d29ad1d1dbdb74188488ea0d1b7e0ddb652c040aa0477a079e92618e52f3b7ceebd62e0f0c6946469c19ae828b7eed288c3a53320c9d5468c39d608bd42d967a21e6b788de7c6826d1b99130083182562f63443290aeeb24308d3eb4881547db34b284d9af2bd7cac0d2f66bd14758c079c345d2bce3e1efc3599b0d06e69a92db7e05473673726e1848df75e83df50f98a9321468c10c6514dd7b3cb5b0ddf2fef13284463fe88ef2bc95d51288e41e3e0ba5c91bd686d7f7658b5bac8e3991a0f3b36f004585d9edd09d478ede73e7da067ea502894fc1247e62c1a84c9065ffccc3da96f07ddce135ceafdd784dda6f64a7add400d21ae13abf98e90fb96fcda23a8ad79905428a349b2230c19cf8cdca1724382bd19b4b075438098bd46cbc668bcbdcf1da85f733a50669f976a7106ff1936f20d799e7d01b0beeba7057a90016fb2d36bb3d14e11ac077ffb91f139d16ae5e78d84559312c9fd1a91ff70e5d9b4fd279420f7647151fe951b705082230ad20415d2f605665cd9374a50f7fc3c32efe30e4c0fc84f0c0e0bcd35e46665e4f29371c1c96324f65a94c85874e8e3baba68b88acc85c38f466d7353b5a00953c8ff5522cf0903e646301e3539d047286d10be16d9fbef02d450b7b12b1ead250f68c4c893fbc6f48765f34a81c477966278a1c6945d14d6531f0b5e4cef4edaf3708a5787162c39b236272694b0a302465d01004fa9d516414c040c27fccbf38466023b06293bd07f31fcdbb3b5378a8b2c7886e1551b1caeab96f7f3c159075756f5f8aaa05b6f87f581d802903c36d84169f87c01a77c43284adf66daa38fe8633a8f6d258231466aba89bb9e56ef289d2815cde433e381ddf74852302684b974c80d0a1d7d578039120a2db36213d167687e9390ed8f14c709d3149d2f5cfbc42a9b85e6d10c4ee77270534fdff2ccb816dedc6377257a73ba2a4ea82c0cc4a81bfb939f710b109beb279edeeda345867c0130ff6fc0fbbbbd25d16d2e73f6ab2fbb4c72abc0713ef9e4690b72208c468fa64c21f2247867b5edd514e5be23733ef2136b10e03161fe3b4e6018933df1f97a8250747e6140780a064e0c35bfafcb8177a75e8fde25e61e1c6332bbf7ddc3a11910feb3bd66707a7c1a9f87b320298acec88135a177b2f6f3c0ff02765f34c30b078b58470cd227ce4c0a1a77e662180b28fb360c3fbf47ba8982510979b86332b6d8a53d5f3947665c119a71e5b6ddc64228b47c3e23c30ebbede354d71d2780456de3f717a384112ccc9805dfe107fc9440ab7abccd8463150ce1306778fbea9793d88c4a7864a925bac5da593d6b72ee2c743f0b732a10285d293359c8126004f06fa12c0b3ca9c1e9c0a75b587207965c49f7450cb4210da01e5604f83f849b7eb9cb3f73de4bcff064b4fe08580971184db940bf29d6b8cea027dbfe0b78d6fa574da4b5b4db57b4e1939213c9848537baaf8076b1db4e767a467ec6a47c67aa33df96d7113cc9884124bca5579ce0058618b1b1f13ff3b102ff54e6bde7e29fd555acd6ffe6429af27301b6bbda38403d8f6f3266d7724f517778e7b2c1e13e3d83104ce743857c07b70cb5be8d6d8757d63a3bedfc9de79cc7e96c9833e15fd65f1336178568c2453a49cfc8c8dcb4f5d978f0a6144d62a5da6fd75d08a41084d4c59b345e07a5f4446af6e5216cc8d94347d333030015e262036f0a4fa6d2ae523654c55b37ac179efb66d230de5c70a9b33738ef0cd4ba2710d9ec03f426701101182051516a9be380a07e2555a8cda03eafc72d2bc2bc1dcadde4bb819692c1736b0ed203c4934842d791aae9e10bf239cc5393c9faf967109444c8f44532766ca5481f0ac16d25753a121727271c71d97b401dafe91588b362f2798f047deece9f860624b2d5753e46f929f8c2d03753e7245ffed6d8e36c7b380c4fb6a27e087a38b5e4a80f0043f95e5a20701c62692e684a764074e47badcecf8b2145be47b5b7089c249abcf0743a61b517004d120929d7846a39a46e0ddbcf5334fc01aca0bff31e67da8b3c88e38504db1dc3940c55bee158ae6dfcce289cf91106397d8e3990149a86c819e0354d785a4eed76fa6380491b01efbc23e7189ec253884d384865bca5da9a0917d68144a0a02cde867c365d339a025b7c8a16b82e341719a259ede8f09c165a354fd3e8f5d59e349e7c36302cf8ed115537969b598337fe7575157c89a254c0829cdb243d3d788321c756bf2817721db4bead96e1f25be5b8c7100d149d13900b6c6491ddbdbeaef7753ed5c5d9b07449bfd023501075ec08c37c13df696bf73500bd440a6522f5b955862c5eee8dc6e875c5055350b3397a6b31d2b764308ad24aafd4113af76f38f4aaba9e24efa3ad5b1c008a6cad2411ef6c7276dd3a5a2ae8130f91c36c34137731426fdf5272ef4ac5c4415e2d0f7b50da3ae910ba22bb5b962351e841746b"}) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(0xffffffffffffffff, 0xd000943d, &(0x7f00000745c0)={0x5af, [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r17}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r15}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r19, r18}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r18}, {}, {0x0, r14}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r16}, {}, {}, {}, {}, {}, {0x0, r14}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r18}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r16}, {}, {}, {}, {}, {}, {}, {0x0, r15}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r16}], 0x81, "7464fbe08eb369"}) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(r3, 0xd000943d, &(0x7f0000043f80)={0xffff, [{r4, r6}, {r5, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {0x0, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r4, r6}, {r5}, {r5, r6}, {0x0, r6}, {r4, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {0x0, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {0x0, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {0x0, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r4}, {r5}, {0x0, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r4, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r4}, {r4, r6}, {r4}, {r5}, {r4, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r4, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {0x0, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {r4, r6}, {r5}, {0x0, r6}, {0x0, r6}, {r5, r6}, {r5, r6}, {0x0, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {0x0, r6}, {r4, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r5}, {r4, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {0x0, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r5}, {r4, r6}, {r4, r6}, {0x0, r6}, {r5, r6}, {0x0, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r4}, {0x0, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r5}, {r5, r6}, {r5, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {0x0, r6}, {0x0, r6}, {r4, r6}, {r5}, {r5}, {r4, r6}, {r4, r6}, {0x0, r6}, {r5, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {r4, r6}, {r4}, {r5}, {r4, r6}, {0x0, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {r4, r6}, {0x0, r6}, {0x0, r6}, {r5, r6}, {r4}, {r5, r6}, {r4}, {r4}, {r4, r6}, {r4, r6}, {r5, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r4}, {r5, r6}, {r4, r6}, {r4, r6}, {r4, r6}, {r5, r6}, {r5, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r5}, {r5, r6}, {r4}, {r4, r6}, {r5, r6}, {r4, r6}, {r4, r6}, {r4}, {r4, r6}, {r4, r6}, {r5, r6}], 0xa4, "c71a4b87d907f8"}) (async) r21 = socket$netlink(0x10, 0x3, 0xf) ioctl$sock_SIOCSIFVLAN_GET_VLAN_VID_CMD(r21, 0x8983, &(0x7f0000000040)) ioctl$sock_SIOCGIFVLAN_DEL_VLAN_CMD(r21, 0x8982, 0x0) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(r21, 0x81f8943c, &(0x7f0000002c40)={0x0, ""/256, 0x0, 0x0}) ioctl$BTRFS_IOC_INO_LOOKUP_USER(0xffffffffffffffff, 0xd000943e, &(0x7f0000001800)={0x0, r22, "02b66c11beed7c64705a2327ec95767e4393a580b2c3043a2fcfb08839b8897467f4a525091f0f7d2480b2fbdcfd2a3924b674e8aed38628fb035a463934e151ef7c0289d4fd7b308135026f77657ca78a849330727be579703bef5f51cf16a7198f8eb8e962cc55e47a88645dc99b6e4dfd15399b64e979124ef3a9120208d05d9de3ff1ce9cc9e6353b97b13c914e3530a6ec5b967674f3cbc69538c66356f6777af618dd96e1730048727e1752cc8139776c1e5061154ffe7838008ce90ccbf0827c03a28016d5f3bdabbc98bee9c405509e3e094ba1677d6b347061c346722828810ba1b68424c585770f6527f3856630aedb97f2ed5ff2e013d5d412046", "e26481ed1e7c639b5947fa03672a9556f2d9c88f35f8f8b62d6b01c1aef3d08f4ee43881217f959db47d280e8448925694f755ec0256840e58a31c14f0d78d223c58da8e0bd812fb893403e655823624c9e0581484207a6d914ad9befaa148971274f98aa0b753b38761ffcd20135aa09bee95ffc38cfb410de6eb0b1c0eaf69af8375bb982d21281acaa2966309f31ed037b8789d3bf55cda6f1fdefac7c7d4eec101525b850f72c5d3515de41ea6c4cc0a1d4c9d0e83fe98d8baa6325482d6270833cf890aba6768abf6a6ac45c0268bac824f692a521bf8500ce437d7bd4ecafbf918c063d8af3d110e24ddc569f535794d4c8c4ab3897b27310c8d39efca731b7a22caf0ec5f2df04ce6b496582e72b5cbb10480d59c191cd3eca3d3a973fe653216cd08c8de4098133f85da499af2f6a6c7513755f40d13810388f5476a67bb722dc832e4af4c76fda32aa9699c8191a644df90df4b2b2e7993e90bee48e3b65cbc84de44a15926c157018e46c849d57933b96f67dcec40eb733515b880aabbfd1324433ef61e0a112430d3829717820a9eb79c8767614facf386e07a7df2ceb2e9f9d33d65d33fecc7697f5bf74769a67ac297756ec495eaf87674ad5fae2fd772301896e85c617328f32f69bb718bf1ade1d8fd637a6bb4c7044929bf43757821b2bde4ce2ec164ef3841458207326249547ccd2c3ca3467e8c5474cb820ee8647c90d2da6af054fa1f25afb1b0792dcf21b2736c67fc8119a6bdbe19689bb639113f5b9165a50b6f2df8dd8c549220488da3bb10e45dcc7a6207635a550e5ee913aa8ec6ea5f92ba59eb003424e6ea472df633220c8e9644d5bf2f4b01027fc5c0422c39932531e1be7e62e021ac4592b57d95720d5ae86f4bd11d95ad82569ab24e0d1b5a144e6fbcc76c4d7800a70069a852979a3a1f02fcbef6bfeff2101576c998109c65350175bd05435d4be236097340cfafadfde768b1176e6c40f34731164afcd0c3620a0cd015494e432e9aff2c59a4cf476d9037a398183fe74789da0300172e1c02173ff867faf6fbed165ca819be3e4ea05fb6ff1633430984bece64ef93ff8d012b9c321941d59f5b8572008f6bb22597864ea1fbd6b9e349b6d9dc1adc185ea32d5e67e44ac5ebea2063635a53e1718a4246ea47a8874daae0ac6653f821f381a940cbfc98d4a25aea33c63309ee1c6c20f349b673509f09b5381956611359754bdb2eeda100ccb4834596de45556611d5b568f2604653c2129a7d5bd50a209a6d4a956c108b5349a8d208431a2edb17d7650332d308ea2e6f324e589f0e98de49ab655a77509637f0a65614c33f16f91e0d6305e8f9ba0766154e1599f67005ff75af7a6143e574acb799f2363dbb37971fc451c0054b5753de3e2278afe06b9e64b93ab09b4985fe26626a6266da1088f7b9755ac8a9e4e6e99a0dedfe2b1509de12f75d9dea83475543d2b328e986779f4992c870ad128b9d09e8ebcc264e8bb5b85d6d62ab902b8ed7409448cc26a777882627bf3e0c9cdd473289154560c2838c6d4fdaea97ff5d7992909c67710dfeb4977ea7bc06d77b06a19efa42178c46a3fa66ed7d6e2b67ab86be5d94721b099947563db706c5c3a12744ea61d63fce93f546f2669c50b6568e3f32d79bfc75adefc21ee93c5c5360cf2e7ac19571c3663857baf3325b666570642da4dcb309dab05b7ad05bf832d28fa8e871f7b89d6f4327953f4dbde3aac022f4dfe050d0924427e39a8027fbc4b54c4a8c2bf35f8c11c9c0ac4bc1fc31d6bdb2cee2675c5a782aa3ab5ffcc7be7207c89cdb8546fcfdd3a5aadfee5a296abd9afc127ebbf580649e9132b55d9f40a3778af49248b593e00c9f4812b8f7adb7323ab7852e4ed09c55aa356b2e8fee6eb14be3659fdf821d23ac556845f70670d8b6a417c29ac0b1585ea865634c0c921e9d930d4018f66d1e024179d80a7154c491c8662d427ab44a3633480782889f1d00c1d182ede30d4127d769291ef408b574a41c29153b7d949d4648b60a4faf32b380dbc87146c36479d47023876abd4ae7e289ba79d988f6c3a86a75bdc784bea2f0b7e2f77cb1282f54a18e1117e50ffa46ac208fcf8a7b5751e83b3e80cb428e4c2ae63ca3cf7b2c4353303edfe328626391f7925f1ab4ef1c7d0f54d0e45590188ecdb6d2c9c0db9b0552dc81b21dc1bbe9e94be5f78dad11f53d6fc3602e9ffb872d3557c44a133ee94e50d57d5eae2214569975982c63f22750db6f5c979ed81b3f68fe6be6daa85bcd98f25548c1d4db1f15394bc708933e0352e4059bedbf832abaf75e9bdca6fea93600357ca4153357d2e0661a308edf82c0f53e7a121c7e1e8bafdc5fdb61c21f4716d06b43a8395aa915a34c4dc4b5b15ba70a4e163728a9966515682b9ca3c2499aeb17a3c17f905fefed806b504f77c52564006273282db5e0c70565ee1016fc7c241830bef951294db9682ef41742b6550246e539143f15d6c2f017a8083ec97eb3a2de8bc2d8d5fec2b9b88996b3ff6c5aa5bed326f72fe9190de74a83e380b9fe89324421697d124d9a1bd3ced8e1856923147958582d737a36da6af4a0fd92b83f0ef0cb1a725d3a5ecb3a39ad039d200989a281a0686336457824ef582698222b7a063475b793a21745d6701940a0baf124461ad71de8dff6a6f7ed676d107e01ae6b7b79aa1f96efce91039dead977bf70365de8ebb3ec06634246d62f78287831984accef27048794468f520db5c71b4fa94818ddc7394fa1b609adb8cc80c0f32efac26a47bd74119100f0cfdcb889aab1019786cc0eefc1e4295ae919e9f6c8c8a16aa76a2bfe39adf5929e9b9925da0241e734378fe140981e3536bf0b77eb0263297a936a5f37605f128d79b16723b953331f99633b8d29ad1d1dbdb74188488ea0d1b7e0ddb652c040aa0477a079e92618e52f3b7ceebd62e0f0c6946469c19ae828b7eed288c3a53320c9d5468c39d608bd42d967a21e6b788de7c6826d1b99130083182562f63443290aeeb24308d3eb4881547db34b284d9af2bd7cac0d2f66bd14758c079c345d2bce3e1efc3599b0d06e69a92db7e05473673726e1848df75e83df50f98a9321468c10c6514dd7b3cb5b0ddf2fef13284463fe88ef2bc95d51288e41e3e0ba5c91bd686d7f7658b5bac8e3991a0f3b36f004585d9edd09d478ede73e7da067ea502894fc1247e62c1a84c9065ffccc3da96f07ddce135ceafdd784dda6f64a7add400d21ae13abf98e90fb96fcda23a8ad79905428a349b2230c19cf8cdca1724382bd19b4b075438098bd46cbc668bcbdcf1da85f733a50669f976a7106ff1936f20d799e7d01b0beeba7057a90016fb2d36bb3d14e11ac077ffb91f139d16ae5e78d84559312c9fd1a91ff70e5d9b4fd27940415d2f605665cd9374a50f7fc3c32efe30e4c0fc84f0c0e0bcd35e46665e4f29371c1c96324f65a94c85874e8e3baba68b88acc85c38f466d7353b5a00953c8ff5522cf0903e646301e3539d047286d10be16d9fbef02d450b7b12b1ead250f68c4c893fbc6f48765f34a81c477966278a1c6945d14d6531f0b5e4cef4edaf3708a5787162c39b236272694b0a302465d01004fa9d516414c040c27fccbf38466023b06293bd07f31fcdbb3b5378a8b2c7886e1551b1caeab96f7f3c159075756f5f8aaa05b6f87f581d802903c36d84169f87c01a77c43284adf66daa38fe8633a8f6d258231466aba89bb9e56ef289d2815cde433e381ddf74852302684b974c80d0a1d7d578039120a2db36213d167687e9390ed8f14c709d3149d2f5cfbc42a9b85e6d10c4ee77270534fdff2ccb816dedc6377257a73ba2a4ea82c0cc4a81bfb939f710b109beb279edeeda345867c0130ff6fc0fbbbbd25d16d2e73f6ab2fbb4c72abc0713ef9e4690b72208c468fa64c21f2247867b5edd514e5be23733ef2136b10e03161fe3b4e6018933df1f97a8250747e6140780a064e0c35bfafcb8177a75e8fde25e61e1c6332bbf7ddc3a11910feb3bd66707a7c1a9f87b320298acec88135a177b2f6f3c0ff02765f34c30b078b58470cd227ce4c0a1a77e662180b28fb360c3fbf47ba8982510979b86332b6d8a53d5f3947665c119a71e5b6ddc64228b47c3e23c30ebbede354d71d2780456de3f717a384112ccc9805dfe107fc9440ab7abccd8463150ce1306778fbea9793d88c4a7864a925bac5da593d6b72ee2c743f0b732a10285d293359c8126004f06fa12c0b3ca9c1e9c0a75b587207965c49f7450cb4210da01e5604f83f849b7eb9cb3f73de4bcff064b4fe08580971184db940bf29d6b8cea027dbfe0b78d6fa574da4b5b4db57b4e1939213c9848537baaf8076b1db4e767a467ec6a47c67aa33df96d7113cc9884124bca5579ce0058618b1b1f13ff3b102ff54e6bde7e29fd555acd6ffe6429af27301b6bbda38403d8f6f3266d7724f517778e7b2c1e13e3d83104ce743857c07b70cb5be8d6d8757d63a3bedfc9de79cc7e96c9833e15fd65f1336178568c2453a49cfc8c8dcb4f5d978f0a6144d62a5da6fd75d08a41084d4c59b345e07a5f4446af6e5216cc8d94347d333030015e262036f0a4fa6d2ae523654c55b37ac179efb66d230de5c70a9b33738ef0cd4ba2710d9ec03f426701101182051516a9be380a07e2555a8cda03eafc72d2bc2bc1dcadde4bb819692c1736b0ed203c4934842d791aae9e10bf239cc5393c9faf967109444c8f44532766ca5481f0ac16d25753a121727271c71d97b401dafe91588b362f2798f047deece9f860624b2d5753e46f929f8c2d03753e7245ffed6d8e36c7b380c4fb6a27e087a38b5e4a80f0043f95e5a20701c62692e684a764074e47badcecf8b2145be47b5b7089c249abcf0743a61b517004d120929d7846a39a46e0ddbcf5334fc01aca0bff31e67da8b3c88e38504db1dc3940c55bee158ae6dfcce289cf91106397d8e3990149a86c819e0354d785a4eed76fa6380491b01efbc23e7189ec253884d384865bca5da9a0917d68144a0a02cde867c365d339a025b7c8a16b82e341719a259ede8f09c165a354fd3e8f5d59e349e7c36302cf8ed115537969b598337fe7575157c89a254c0829cdb243d3d788321c756bf2817721db4bead96e1f25be5b8c7100d149d13900b6c6491ddbdbeaef7753ed5c5d9b07449bfd023501075ec08c37c13df696bf73500bd440a6522f5b955862c5eee8dc6e875c5055350b3397a6b31d2b764308ad24aafd4113af76f38f4aaba9e24efa3ad5b1c008a6cad2411ef6c7276dd3a5a2ae8130f91c36c34137731426fdf5272ef4ac5c4415e2d0f7b50da3ae910ba22bb5b962351e841746b00"}) (async) ioctl$BTRFS_IOC_GET_SUBVOL_INFO(0xffffffffffffffff, 0x81f8943c, &(0x7f0000000540)={0x0, ""/256, 0x0, 0x0}) (async) ioctl$BTRFS_IOC_INO_LOOKUP_USER(0xffffffffffffffff, 0xd000943e, &(0x7f0000001800)={0x0, 0x0, "02b66c11beed7c64705a2327ec95767e4393a580b2c3043a2fcfb08839b8897467f4a525091f0f7d2480b2fbdcfd2a3924b674e8aed38628fb035a463934e151ef7c0289d4fd7b308135026f77657ca78a849330727be579703bef5f51cf16a7198f8eb8e962cc55e47a88645dc99b6e4dfd15399b64e979124ef3a9120208d05d9de3ff1ce9cc9e6353b97b13c914e3530a6ec5b967674f3cbc69538c66356f6777af618dd96e1730048727e164fcc8139776c1e5061154ffe7838008ce90ccbf0827c03a28016d5f3bdabbc98bee9c405509e3e094ba1677d6b347061c346722828810ba1b68424c585770f6527f3856630aedb97f2ee0742e013d5d412046", "e26481ed1e7c639b5947fa03672a9556f2d9c88f35f8f8b62d6b01c1aef3d08f4ee43881217f959db47d280e8448925694f755ec0256840e58a31c14f0d78d223c58da8e0bd812fb893403e655823624c9e0581484207a6d914ad9befaa148971274f98aa0b753b38761ffcd20135aa09bee95ffc38cfb410de6eb0b1c0eaf69af8375bb982d21281acaa2966378f31ed037b8789d3bf55cda6f1fdefac7c7d4eec101525b850f72c5d3515de41ea6c4cc0a1d4c9d0e83fe98d8baa6325482d6270833cf890aba6768abf6a6ac45c0268bac824f692a521bf8500ce437d7bd4ecafbf918c063d8af3d110e24ddc569f535794d4c8c4ab3897b27310c8d39efca731b7a22caf0ec5f2df04ce6b496582e72b5cbb10480d59c191cd3eca3d3a973fe653216cd08c8de4098133f85da499af2f6a6c7513755f40d13810388f5476a67bb722dc832e4af4c76fda32aa9699c8191a644df90df4b2b2e7993e90bee48e3b65cbc84de44a15926c157018e46c849d57933b96f67dcec40eb733515b880aabbfd1324433ef61e0a112430d3829717820a9eb79c8767614facf386e07a7df2ceb2e9f9d33d65d33fecc7697f5bf74769a67ac297756ec495eaf87674ad5fae2fd772301896e85c617328f32f69bb718bf1ade1d8fd637a6bb4c7044929bf43757821b2bde4ce2ec164ef3841458207326249547ccd2c3ca3467e8c5474cb820ee8647c90d2da6af054fa1f25afb1b0792dcf21b2736c67fc8119a6bdbe19689bb639113f5b9165a50b6f2df8dd8c549220488da3bb10e45dcc7a6207635a550e5ee913aa8ec6ea5f92ba59eb003424e6ea472df633220c8e9644d5bf2f4b01027fc5c0422c39932531e1be7e62e021ac4592b57d95720d5ae86f4bd11d95ad82569ab24e0d1b5a144e6fbcc76c4d7800a70069a852979a3a1f02fcbef6bfeff2101576c998109c65350175bd05435d4be236097340cfafadfde768b1176e6c40f34731164afcd0c3620a0cd015494e432e9aff2c59a4cf476d9037a398183fe74789da0300172e1c02173ff867faf6fbed165ca819be3e4ea05fb6ff1633430984bece64ef93ff8d012b9c321941d59f5b8572008f6bb22597864ea1fbd6b9e349b6d9dc1adc185ea32d5e67e44ac5ebea2063635a53e1718a4246ea47a8874daae0ac6653f821f381a940cbfc98d4a25aea33c63309ee1c6c20f349b673509f09b5381956611359754bdb2eeda100ccb4834596de45556611d5b568f2604653c2129a7d5bd50a209a6d4a956c108b5349a8d208431a2edb17d7650332d308ea2e6f324e589f0e98de49ab655a77509637f0a65614c33f16f91e0d6305e8f9ba0766154e1599f67005ff75af7a6143e574acb799f2363dbb37971fc451c0054b5753de3e2278afe06b9e64b93ab09b4985fe26626a6266da1088f7b9755ac8a9e4e6e99a0dedfe2b1509de12f75d9dea83475543d2b328e986779f4992c870ad128b9d09e8ebcc264e8bb5b85d6d62ab902b8ed7409448cc26a777882627bf3e0c9cdd473289154560c2838c6d4fdaea97ff5d7992909c67710dfeb4977ea7bc06d77b06a19efa42178c46a3fa66ed7d6e2b67ab86be5d94721b099947563db706c5c3a12744ea61d63fce93f546f2669c50b6568e3f32d79bfc75adefc21ee93c5c5360cf2e7ac19571c3663857baf3325b666570642da4dcb309dab05b7ad05bf832d28fa8e871f7b89d6f4327953f4dbde3aac022f4dfe050d0924427e39a8027fbc4b54c4a8c2bf35f8c11c9c0ac4bc1fc31d6bdb2cee2675c5a782aa3ab5ffcc7be7207c89cdb8546fcfdd3a5aadfee5a296abd9afc127ebbf580649e9132b55d9f40a3778af49248b593e00c9f4812b8f7adb7323ab7852e4ed09c55aa356b2e8fee6eb14be3659fdf821d23ac556845f70670d8b6a417c29ac0b1585ea865634c0c921e9d930d4018f66d1e024179d80a7154c491c8662d427ab44a3633480782889f1d00c1d182ede30d4127d769291ef408b574a41c29153b7d949d4648b60a4faf32b380dbc87146c36479d47023876abd4ae7e289ba79d988f6c3a86a75bdc784bea2f0b7e2f77cb1282f54a18e1117e50ffa46ac208fcf8a7b5751e83b3e80cb428e4c2ae63ca3cf7b2c4353303edfe328626391f7925f1ab4ef1c7d0f54d0e45590188ecdb6d2c9c0db9b0552dc81b21dc1bbe9e94be5f78dad11f53d6fc3602e9ffb872d3557c44a133ee94e50d57d5eae2214569975982c63f22750db6f5c979ed81b3f68fe6be6daa85bcd98f25548c1d4db1f15394bc708933e0352e4059bedbf832abaf75e9bdca6fea93600357ca4153357d2e0661a308edf82c0f53e7a121c7e1e8bafdc5fdb61c21f4716d06b43a8395aa915a34c4dc4b5b15ba70a4e163728a9966515682b9ca3c2499aeb17a3c17f905fefed806b504f77c52564006273282db5e0c70565ee1016fc7c241830bef951294db9682ef41742b6550246e539143f15d6c2f017a8083ec97eb3a2de8bc2d8d5fec2b9b88996b3ff6c5aa5bed326f72fe9190de74a83e380b9fe89324421697d124d9a1bd3ced8e1856923147958582d737a36da6af4a0fd92b83f0ef0cb1a725d3a5ecb3a39ad039d200989a281a0686336457824ef582698222b7a063475b793a21745d6701940a0baf124461ad71de8dff6a6f7ed676d107e01ae6b7b79aa1f96efce91039dead977bf70365de8ebb3ec06634246d62f78287831984accef27048794468f520db5c71b4fa94818ddc7394fa1b609adb8cc80c0f32efac26a47bd74119100f0cfdcb889aab1019786cc0eefc1e4295ae919e9f6c8c8a16aa76a2bfe39adf5929e9b9925da0241e734378fe140981e3536bf0b77eb0263297a936a5f37605f128d79b16723b953331f99633b8d29ad1d1dbdb74188488ea0d1b7e0ddb652c040aa0477a079e92618e52f3b7ceebd62e0f0c6946469c19ae828b7eed288c3a53320c9d5468c39d608bd42d967a21e6b788de7c6826d1b99130083182562f63443290aeeb24308d3eb4881547db34b284d9af2bd7cac0d2f66bd14758c079c345d2bce3e1efc3599b0d06e69a92db7e05473673726e1848df75e83df50f98a9321468c10c6514dd7b3cb5b0ddf2fef13284463fe88ef2bc95d51288e41e3e0ba5c91bd686d7f7658b5bac8e3991a0f3b36f004585d9edd09d478ede73e7da067ea502894fc1247e62c1a84c9065ffccc3da96f07ddce135ceafdd784dda6f64a7add400d21ae13abf98e90fb96fcda23a8ad79905428a349b2230c19cf8cdca1724382bd19b4b075438098bd46cbc668bcbdcf1da85f733a50669f976a7106ff1936f20d799e7d01b0beeba7057a90016fb2d36bb3d14e11ac077ffb91f139d16ae5e78d84559312c9fd1a91ff70e5d9b4fd279420f7647151fe951b705082230ad20415d2f605665cd9374a50f7fc3c32efe30e4c0fc84f0c0e0bcd35e46665e4f29371c1c96324f65a94c85874e8e3baba68b88acc85c38f466d7353b5a00953c8ff5522cf0903e646301e3539d047286d10be16d9fbef02d450b7b12b1ead250f68c4c893fbc6f48765f34a81c477966278a1c6945d14d6531f0b5e4cef4edaf3708a5787162c39b236272694b0a302465d01004fa9d516414c040c27fccbf38466023b06293bd07f31fcdbb3b5378a8b2c7886e1551b1caeab96f7f3c159075756f5f8aaa05b6f87f581d802903c36d84169f87c01a77c43284adf66daa38fe8633a8f6d258231466aba89bb9e56ef289d2815cde433e381ddf74852302684b974c80d0a1d7d578039120a2db36213d167687e9390ed8f14c709d3149d2f5cfbc42a9b85e6d10c4ee77270534fdff2ccb816dedc6377257a73ba2a4ea82c0cc4a81bfb939f710b109beb279edeeda345867c0130ff6fc0fbbbbd25d16d2e73f6ab2fbb4c72abc0713ef9e4690b72208c468fa64c21f2247867b5edd514e5be23733ef2136b10e03161fe3b4e6018933df1f97a8250747e6140780a064e0c35bfafcb8177a75e8fde25e61e1c6332bbf7ddc3a11910feb3bd66707a7c1a9f87b320298acec88135a177b2f6f3c0ff02765f34c30b078b58470cd227ce4c0a1a77e662180b28fb360c3fbf47ba8982510979b86332b6d8a53d5f3947665c119a71e5b6ddc64228b47c3e23c30ebbede354d71d2780456de3f717a384112ccc9805dfe107fc9440ab7abccd8463150ce1306778fbea9793d88c4a7864a925bac5da593d6b72ee2c743f0b732a10285d293359c8126004f06fa12c0b3ca9c1e9c0a75b587207965c49f7450cb4210da01e5604f83f849b7eb9cb3f73de4bcff064b4fe08580971184db940bf29d6b8cea027dbfe0b78d6fa574da4b5b4db57b4e1939213c9848537baaf8076b1db4e767a467ec6a47c67aa33df96d7113cc9884124bca5579ce0058618b1b1f13ff3b102ff54e6bde7e29fd555acd6ffe6429af27301b6bbda38403d8f6f3266d7724f517778e7b2c1e13e3d83104ce743857c07b70cb5be8d6d8757d63a3bedfc9de79cc7e96c9833e15fd65f1336178568c2453a49cfc8c8dcb4f5d978f0a6144d62a5da6fd75d08a41084d4c59b345e07a5f4446af6e5216cc8d94347d333030015e262036f0a4fa6d2ae523654c55b37ac179efb66d230de5c70a9b33738ef0cd4ba2710d9ec03f426701101182051516a9be380a07e2555a8cda03eafc72d2bc2bc1dcadde4bb819692c1736b0ed203c4934842d791aae9e10bf239cc5393c9faf967109444c8f44532766ca5481f0ac16d25753a121727271c71d97b401dafe91588b362f2798f047deece9f860624b2d5753e46f929f8c2d03753e7245ffed6d8e36c7b380c4fb6a27e087a38b5e4a80f0043f95e5a20701c62692e684a764074e47badcecf8b2145be47b5b7089c249abcf0743a61b517004d120929d7846a39a46e0ddbcf5334fc01aca0bff31e67da8b3c88e38504db1dc3940c55bee158ae6dfcce289cf91106397d8e3990149a86c819e0354d785a4eed76fa6380491b01efbc23e7189ec253884d384865bca5da9a0917d68144a0a02cde867c365d339a025b7c8a16b82e341719a259ede8f09c165a354fd3e8f5d59e349e7c36302cf8ed115537969b598337fe7575157c89a254c0829cdb243d3d788321c756bf2817721db4bead96e1f25be5b8c7100d149d13900b6c6491ddbdbeaef7753ed5c5d9b07449bfd023501075ec08c37c13df696bf73500bd440a6522f5b955862c5eee8dc6e875c5055350b3397a6b31d2b764308ad24aafd4113af76f38f4aaba9e24efa3ad5b1c008a6cad2411ef6c7276dd3a5a2ae8130f91c36c34137731426fdf5272ef4ac5c4415e2d0f7b50da3ae910ba22bb5b962351e841746b"}) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(0xffffffffffffffff, 0xd000943d, &(0x7f00000745c0)={0x5af, [{}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r26}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r24}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r28, r27}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r27}, {}, {0x0, r23}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r25}, {}, {}, {}, {}, {}, {0x0, r23}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {0x0, r27}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r25}, {}, {}, {}, {}, {}, {}, {0x0, r24}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {r25}], 0x81, "7464fbe08eb369"}) (async) ioctl$BTRFS_IOC_GET_SUBVOL_ROOTREF(0xffffffffffffffff, 0xd000943d, &(0x7f0000044f80)={0xffffffff, [{}, {0x0, r6}, {0x0, r6}, {}, {}, {0x0, r6}, {0x0, r6}, {}, {r5}, {r5}, {r4, r6}, {r5}, {r4, r6}, {r5}, {r4}, {}, {r5}, {}, {r4}, {r5}, {r4, r6}, {0x0, r6}, {}, {r4, r6}, {r5}, {r5}, {r5}, {r5, r6}, {0x0, r6}, {r5, r6}, {}, {}, {r4}, {0x0, r6}, {0x0, r6}, {r5}, {r5, r6}, {r4, r6}, {r4, r6}, {0x0, r6}, {}, {r5, r6}, {r5, r6}, {r5, r6}, {}, {0x0, r6}, {r4, r6}, {}, {r5}, {r5, r6}, {r5, r6}, {r5}, {0x0, r6}, {}, {}, {r5}, {0x0, r6}, {r4}, {}, {}, {r5}, {}, {r4, r6}, {}, {0x0, r6}, {}, {r5}, {r4, r6}, {r4}, {}, {r5, r6}, {r4}, {r5}, {r5}, {r4}, {}, {}, {0x0, r6}, {r5}, {r5}, {}, {}, {}, {r4}, {}, {r4}, {r5, r6}, {}, {r5}, {r5, r6}, {r5, r6}, {}, {r5, r6}, {r4}, {r5, r6}, {r4}, {}, {r4, r6}, {r5}, {r4, r6}, {r5}, {r5}, {0x0, r6}, {r4}, {r5}, {}, {r4}, {r5}, {r5}, {r4}, {r4}, {r5, r6}, {r5}, {r4}, {r4, r6}, {r4}, {}, {}, {r4}, {0x0, r6}, {r5, r6}, {0x0, r6}, {0x0, r6}, {r5, r6}, {r5}, {}, {0x0, r6}, {r4, r6}, {0x0, r6}, {}, {r4}, {}, {r4, r6}, {0x0, r6}, {r4}, {r4, r6}, {0x0, r6}, {r4, r6}, {r4}, {0x0, r6}, {}, {}, {}, {0x0, r6}, {0x0, r6}, {r4, r6}, {0x0, r6}, {r5, r6}, {r5}, {r4}, {r5, r6}, {r5, r6}, {0x0, r6}, {r5}, {}, {r5}, {0x0, r6}, {}, {}, {r5}, {r4, r6}, {0x0, r6}, {}, {r5, r6}, {r5, r6}, {0x0, r6}, {r5}, {}, {}, {0x0, r6}, {}, {r4, r6}, {0x0, r6}, {0x0, r6}, {}, {}, {}, {r4, r6}, {}, {r4}, {}, {r5}, {r4, r6}, {r4, r6}, {}, {r4}, {r5}, {}, {}, {0x0, r6}, {}, {r4}, {0x0, r6}, {r4}, {r4}, {}, {0x0, r6}, {}, {0x0, r6}, {}, {r5, r6}, {0x0, r6}, {r4, r6}, {}, {r4}, {0x0, r6}, {}, {r5, r6}, {r5, r6}, {0x0, r6}, {0x0, r6}, {r5}, {r4, r6}, {0x0, r6}, {}, {r4, r6}, {r5, r6}, {r5}, {}, {r5, r6}, {r5}, {r5, r6}, {r4}, {0x0, r6}, {r4, r6}, {}, {0x0, r6}, {}, {0x0, r6}, {0x0, r6}, {}, {0x0, r6}, {r4, r6}, {}, {r5}, {}, {}, {}, {}, {0x0, r6}, {r5}, {r5}, {r5, r6}, {r4}, {r4}, {r4, r6}, {r5}, {0x0, r6}, {r5, r6}, {}, {r4, r6}, {r9}, {r16, r20}, {r4, r6}, {r25, r6}], 0x7f, "432ed70badba95"}) (async) write$binfmt_script(r0, &(0x7f00000000c0)=ANY=[@ANYBLOB="2321202e2f66696c832f0a"], 0xb) [ 1964.088606][T27764] 8021q: adding VLAN 0 to HW filter on device bond905 01:55:13 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) ioctl$sock_kcm_SIOCKCMCLONE(r0, 0x89e2, &(0x7f0000000000)={r0}) r2 = socket$nl_route(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f000000c140)={0x0, 0x0, &(0x7f00000002c0)={&(0x7f00000007c0)=ANY=[@ANYBLOB="280029aa2c332d5bea9bad39bf59452a34d9d8000030a2b55582f5fd1e6fdfdec12aa9c829dedc5b40cb985464b366067f5b3489057398384641c619000100be085754872b39582adc020000006486764c95b1a0d000c4c30300112b7cd9776bf684254a0625a1f185eefcd79e03ee1710673a0a30af7c8713cbeef788109ce3a57a779d05208325ffaaba742b71c071c7597d4c55bc07b4fa208fef7e6db9f1bef859d7360beea439da5b6cd2e3916253d9a88d84eb1121f46d4882db8bd52f042df9def409cd02dfcc0e1f8707ff433700b14433fd9e9d21e674241f64c91664afe4edb300cdccd800a51c5d1a5b7ac7200d62ebf9947ed9d4e50cd6b98e93e536d65f5436fe2000000080fbe4ec0846896a11a1a265487beef0048e040000005a4490a91e4c32f69a0f7f9d8872d3fbbd96f2f280fd1ae1409438e5c5fff965c35fb8af18e83f5cf8581035a6e3986840fed343"], 0x28}}, 0x0) sendmsg$nl_route(r2, &(0x7f0000001240)={&(0x7f0000001180)={0x10, 0x0, 0x0, 0x1}, 0xc, &(0x7f0000001200)={&(0x7f00000011c0)=@ipv6_getmulticast={0x14, 0x3a, 0x4, 0x70bd25, 0x25dfdbfc, {}, ["", "", "", "", "", "", ""]}, 0x14}, 0x1, 0x0, 0x0, 0x40041}, 0x40000) ioctl$BTRFS_IOC_START_SYNC(r0, 0x80089418, &(0x7f0000000040)=0x0) ioctl$BTRFS_IOC_RM_DEV_V2(r1, 0x5000943a, &(0x7f0000000180)={{r2}, r3, 0x4, @inherit={0x80, &(0x7f0000000100)={0x1, 0x7, 0x3f, 0x80000000, {0x0, 0x1000, 0x7, 0x7, 0x3000000000000}, [0x2, 0x9955, 0x2611c3fb, 0x80000000, 0x4, 0x8000000000000000, 0x5834]}}, @subvolid=0xffffffffffffffe1}) write$binfmt_script(r0, &(0x7f00000000c0)={'#! ', './file0'}, 0xb) 01:55:13 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) ioctl$sock_kcm_SIOCKCMCLONE(r0, 0x89e2, &(0x7f0000000000)={r0}) (async) r2 = socket$nl_route(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f000000c140)={0x0, 0x0, &(0x7f00000002c0)={&(0x7f00000007c0)=ANY=[@ANYBLOB="280029aa2c332d5bea9bad39bf59452a34d9d8000030a2b55582f5fd1e6fdfdec12aa9c829dedc5b40cb985464b366067f5b3489057398384641c619000100be085754872b39582adc020000006486764c95b1a0d000c4c30300112b7cd9776bf684254a0625a1f185eefcd79e03ee1710673a0a30af7c8713cbeef788109ce3a57a779d05208325ffaaba742b71c071c7597d4c55bc07b4fa208fef7e6db9f1bef859d7360beea439da5b6cd2e3916253d9a88d84eb1121f46d4882db8bd52f042df9def409cd02dfcc0e1f8707ff433700b14433fd9e9d21e674241f64c91664afe4edb300cdccd800a51c5d1a5b7ac7200d62ebf9947ed9d4e50cd6b98e93e536d65f5436fe2000000080fbe4ec0846896a11a1a265487beef0048e040000005a4490a91e4c32f69a0f7f9d8872d3fbbd96f2f280fd1ae1409438e5c5fff965c35fb8af18e83f5cf8581035a6e3986840fed343"], 0x28}}, 0x0) (async) sendmsg$nl_route(r2, &(0x7f0000001240)={&(0x7f0000001180)={0x10, 0x0, 0x0, 0x1}, 0xc, &(0x7f0000001200)={&(0x7f00000011c0)=@ipv6_getmulticast={0x14, 0x3a, 0x4, 0x70bd25, 0x25dfdbfc, {}, ["", "", "", "", "", "", ""]}, 0x14}, 0x1, 0x0, 0x0, 0x40041}, 0x40000) ioctl$BTRFS_IOC_START_SYNC(r0, 0x80089418, &(0x7f0000000040)=0x0) ioctl$BTRFS_IOC_RM_DEV_V2(r1, 0x5000943a, &(0x7f0000000180)={{r2}, r3, 0x4, @inherit={0x80, &(0x7f0000000100)={0x1, 0x7, 0x3f, 0x80000000, {0x0, 0x1000, 0x7, 0x7, 0x3000000000000}, [0x2, 0x9955, 0x2611c3fb, 0x80000000, 0x4, 0x8000000000000000, 0x5834]}}, @subvolid=0xffffffffffffffe1}) write$binfmt_script(r0, &(0x7f00000000c0)={'#! ', './file0'}, 0xb) [ 1964.280200][T27765] bond905: (slave bridge872): making interface the new active one [ 1964.303406][T27765] bridge872: entered promiscuous mode [ 1964.316736][T27765] bond905: (slave bridge872): Enslaving as an active interface with an up link 01:55:14 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) ioctl$sock_kcm_SIOCKCMCLONE(r0, 0x89e2, &(0x7f0000000000)={r0}) socket$nl_route(0x10, 0x3, 0x0) (async) r2 = socket$nl_route(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r2, &(0x7f000000c140)={0x0, 0x0, &(0x7f00000002c0)={&(0x7f00000007c0)=ANY=[@ANYBLOB="280029aa2c332d5bea9bad39bf59452a34d9d8000030a2b55582f5fd1e6fdfdec12aa9c829dedc5b40cb985464b366067f5b3489057398384641c619000100be085754872b39582adc020000006486764c95b1a0d000c4c30300112b7cd9776bf684254a0625a1f185eefcd79e03ee1710673a0a30af7c8713cbeef788109ce3a57a779d05208325ffaaba742b71c071c7597d4c55bc07b4fa208fef7e6db9f1bef859d7360beea439da5b6cd2e3916253d9a88d84eb1121f46d4882db8bd52f042df9def409cd02dfcc0e1f8707ff433700b14433fd9e9d21e674241f64c91664afe4edb300cdccd800a51c5d1a5b7ac7200d62ebf9947ed9d4e50cd6b98e93e536d65f5436fe2000000080fbe4ec0846896a11a1a265487beef0048e040000005a4490a91e4c32f69a0f7f9d8872d3fbbd96f2f280fd1ae1409438e5c5fff965c35fb8af18e83f5cf8581035a6e3986840fed343"], 0x28}}, 0x0) sendmsg$nl_route(r2, &(0x7f0000001240)={&(0x7f0000001180)={0x10, 0x0, 0x0, 0x1}, 0xc, &(0x7f0000001200)={&(0x7f00000011c0)=@ipv6_getmulticast={0x14, 0x3a, 0x4, 0x70bd25, 0x25dfdbfc, {}, ["", "", "", "", "", "", ""]}, 0x14}, 0x1, 0x0, 0x0, 0x40041}, 0x40000) ioctl$BTRFS_IOC_START_SYNC(r0, 0x80089418, &(0x7f0000000040)=0x0) ioctl$BTRFS_IOC_RM_DEV_V2(r1, 0x5000943a, &(0x7f0000000180)={{r2}, r3, 0x4, @inherit={0x80, &(0x7f0000000100)={0x1, 0x7, 0x3f, 0x80000000, {0x0, 0x1000, 0x7, 0x7, 0x3000000000000}, [0x2, 0x9955, 0x2611c3fb, 0x80000000, 0x4, 0x8000000000000000, 0x5834]}}, @subvolid=0xffffffffffffffe1}) (async) ioctl$BTRFS_IOC_RM_DEV_V2(r1, 0x5000943a, &(0x7f0000000180)={{r2}, r3, 0x4, @inherit={0x80, &(0x7f0000000100)={0x1, 0x7, 0x3f, 0x80000000, {0x0, 0x1000, 0x7, 0x7, 0x3000000000000}, [0x2, 0x9955, 0x2611c3fb, 0x80000000, 0x4, 0x8000000000000000, 0x5834]}}, @subvolid=0xffffffffffffffe1}) write$binfmt_script(r0, &(0x7f00000000c0)={'#! ', './file0'}, 0xb) 01:55:14 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x1e, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1964.368902][T27777] netlink: 'syz-executor.0': attribute type 1 has an invalid length. 01:55:14 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f00000000c0)={'#! ', './file0'}, 0xb) sendmsg$AUDIT_GET_FEATURE(r0, &(0x7f0000000140)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x1}, 0xc, &(0x7f0000000100)={&(0x7f0000000040)={0x10, 0x3fb, 0x8, 0x70bd2a, 0x25dfdbfe, "", ["", ""]}, 0x10}, 0x1, 0x0, 0x0, 0x20004095}, 0x4000) [ 1964.485925][T27777] bond1011: entered promiscuous mode [ 1964.495009][T27777] 8021q: adding VLAN 0 to HW filter on device bond1011 01:55:14 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f00000000c0)={'#! ', './file0'}, 0xb) (async) sendmsg$AUDIT_GET_FEATURE(r0, &(0x7f0000000140)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x1}, 0xc, &(0x7f0000000100)={&(0x7f0000000040)={0x10, 0x3fb, 0x8, 0x70bd2a, 0x25dfdbfe, "", ["", ""]}, 0x10}, 0x1, 0x0, 0x0, 0x20004095}, 0x4000) 01:55:14 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x3f6, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) 01:55:14 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f00000000c0)={'#! ', './file0'}, 0xb) sendmsg$AUDIT_GET_FEATURE(r0, &(0x7f0000000140)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x1}, 0xc, &(0x7f0000000100)={&(0x7f0000000040)={0x10, 0x3fb, 0x8, 0x70bd2a, 0x25dfdbfe, "", ["", ""]}, 0x10}, 0x1, 0x0, 0x0, 0x20004095}, 0x4000) [ 1964.562620][T27779] bond1011: (slave bridge976): making interface the new active one [ 1964.570852][T27779] bridge976: entered promiscuous mode [ 1964.583352][T27779] bond1011: (slave bridge976): Enslaving as an active interface with an up link [ 1964.594844][T27783] netlink: 'syz-executor.3': attribute type 1 has an invalid length. [ 1964.719489][T27783] bond959: entered promiscuous mode [ 1964.742125][T27783] 8021q: adding VLAN 0 to HW filter on device bond959 01:55:14 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x6558, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1964.834104][T27785] bond959: (slave bridge917): making interface the new active one [ 1964.852376][T27785] bridge917: entered promiscuous mode [ 1964.865675][T27785] bond959: (slave bridge917): Enslaving as an active interface with an up link [ 1964.876063][T27789] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 1964.900852][T27789] workqueue: Failed to create a rescuer kthread for wq "bond526": -EINTR [ 1964.952885][T27791] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 01:55:14 executing program 1: socket$inet6_tcp(0xa, 0x1, 0x0) (async) r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r0, 0x0) accept4(r0, 0x0, 0x0, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket(0x25, 0x8000e, 0x891) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r3, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r3, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) (async) getsockname$packet(r3, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r4, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) (async) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r4, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r4}]}, 0x4c}}, 0x0) 01:55:14 executing program 5: openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r0, 0x0) r1 = accept4(r0, 0x0, 0x0, 0x0) connect$unix(r1, &(0x7f0000000080)=@file={0x0, './file0\x00'}, 0x6e) sendto$inet6(r1, &(0x7f00000000c0), 0xfffffdda, 0x0, 0x0, 0x600000000000004) sendmsg$IPCTNL_MSG_TIMEOUT_GET(r1, &(0x7f0000000140)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x10000000}, 0xc, &(0x7f0000000040)={&(0x7f0000000200)={0x70, 0x1, 0x8, 0x801, 0x0, 0x0, {0x1}, [@CTA_TIMEOUT_DATA={0x3c, 0x4, 0x0, 0x1, @sctp=[@CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED={0x8, 0x9, 0x1, 0x0, 0x9}, @CTA_TIMEOUT_SCTP_HEARTBEAT_SENT={0x8, 0x8, 0x1, 0x0, 0x81}, @CTA_TIMEOUT_SCTP_SHUTDOWN_RECD={0x8}, @CTA_TIMEOUT_SCTP_COOKIE_WAIT={0x8, 0x2, 0x1, 0x0, 0x3}, @CTA_TIMEOUT_SCTP_HEARTBEAT_SENT={0x8, 0x8, 0x1, 0x0, 0xffff2b15}, @CTA_TIMEOUT_SCTP_HEARTBEAT_SENT={0x8}, @CTA_TIMEOUT_SCTP_COOKIE_WAIT={0x8, 0x2, 0x1, 0x0, 0x2}]}, @CTA_TIMEOUT_NAME={0x9, 0x1, 'syz0\x00'}, @CTA_TIMEOUT_NAME={0x9, 0x1, 'syz0\x00'}, @CTA_TIMEOUT_L3PROTO={0x6, 0x2, 0x1, 0x0, 0x88f8}]}, 0x70}, 0x1, 0x0, 0x0, 0xc000814}, 0x1) 01:55:14 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r1, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r1, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r0, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r2, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x262, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r2}]}, 0x4c}}, 0x0) [ 1964.981193][T27791] workqueue: Failed to create a rescuer kthread for wq "bond356": -EINTR [ 1965.159953][T27809] bond906: entered promiscuous mode [ 1965.185280][T27809] 8021q: adding VLAN 0 to HW filter on device bond906 [ 1965.236414][T27811] bond906: (slave bridge873): making interface the new active one [ 1965.244907][T27811] bridge873: entered promiscuous mode [ 1965.257513][T27811] bond906: (slave bridge873): Enslaving as an active interface with an up link 01:55:15 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x24, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1965.340417][T27819] bond1012: entered promiscuous mode [ 1965.346547][T27819] 8021q: adding VLAN 0 to HW filter on device bond1012 01:55:15 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x500, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1965.450727][T27822] bond1012: (slave bridge977): making interface the new active one [ 1965.459299][T27822] bridge977: entered promiscuous mode [ 1965.471285][T27822] bond1012: (slave bridge977): Enslaving as an active interface with an up link [ 1965.599428][T27826] bond960: entered promiscuous mode [ 1965.605799][T27826] 8021q: adding VLAN 0 to HW filter on device bond960 [ 1965.688710][T27829] bond960: (slave bridge918): making interface the new active one [ 1965.700380][T27829] bridge918: entered promiscuous mode [ 1965.709808][T27829] bond960: (slave bridge918): Enslaving as an active interface with an up link 01:55:15 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x6800, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1965.767080][T27832] bond356: entered promiscuous mode [ 1965.775700][T27832] 8021q: adding VLAN 0 to HW filter on device bond356 01:55:15 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r1, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r1, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r0, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r2, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x262, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r2}]}, 0x4c}}, 0x0) [ 1965.787660][T27835] workqueue: Failed to create a rescuer kthread for wq "bond526": -EINTR 01:55:15 executing program 1: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) (async) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r0, 0x0) accept4(r0, 0x0, 0x0, 0x0) socket$netlink(0x10, 0x3, 0x0) (async) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket(0x25, 0x8000e, 0x891) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r3, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r3, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r4, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) (async) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r4, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r4}]}, 0x4c}}, 0x0) 01:55:15 executing program 5: openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) (async, rerun: 32) r0 = socket$inet6_tcp(0xa, 0x1, 0x0) (rerun: 32) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) (async) listen(r0, 0x0) (async) r1 = accept4(r0, 0x0, 0x0, 0x0) connect$unix(r1, &(0x7f0000000080)=@file={0x0, './file0\x00'}, 0x6e) (async, rerun: 64) sendto$inet6(r1, &(0x7f00000000c0), 0xfffffdda, 0x0, 0x0, 0x600000000000004) (async, rerun: 64) sendmsg$IPCTNL_MSG_TIMEOUT_GET(r1, &(0x7f0000000140)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x10000000}, 0xc, &(0x7f0000000040)={&(0x7f0000000200)={0x70, 0x1, 0x8, 0x801, 0x0, 0x0, {0x1}, [@CTA_TIMEOUT_DATA={0x3c, 0x4, 0x0, 0x1, @sctp=[@CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED={0x8, 0x9, 0x1, 0x0, 0x9}, @CTA_TIMEOUT_SCTP_HEARTBEAT_SENT={0x8, 0x8, 0x1, 0x0, 0x81}, @CTA_TIMEOUT_SCTP_SHUTDOWN_RECD={0x8}, @CTA_TIMEOUT_SCTP_COOKIE_WAIT={0x8, 0x2, 0x1, 0x0, 0x3}, @CTA_TIMEOUT_SCTP_HEARTBEAT_SENT={0x8, 0x8, 0x1, 0x0, 0xffff2b15}, @CTA_TIMEOUT_SCTP_HEARTBEAT_SENT={0x8}, @CTA_TIMEOUT_SCTP_COOKIE_WAIT={0x8, 0x2, 0x1, 0x0, 0x2}]}, @CTA_TIMEOUT_NAME={0x9, 0x1, 'syz0\x00'}, @CTA_TIMEOUT_NAME={0x9, 0x1, 'syz0\x00'}, @CTA_TIMEOUT_L3PROTO={0x6, 0x2, 0x1, 0x0, 0x88f8}]}, 0x70}, 0x1, 0x0, 0x0, 0xc000814}, 0x1) [ 1965.865704][T27837] workqueue: Failed to create a rescuer kthread for wq "bond526": -EINTR [ 1966.046083][T27842] bond907: entered promiscuous mode [ 1966.064428][T27842] 8021q: adding VLAN 0 to HW filter on device bond907 01:55:15 executing program 5: openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) (async) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r0, 0x0) r1 = accept4(r0, 0x0, 0x0, 0x0) connect$unix(r1, &(0x7f0000000080)=@file={0x0, './file0\x00'}, 0x6e) sendto$inet6(r1, &(0x7f00000000c0), 0xfffffdda, 0x0, 0x0, 0x600000000000004) sendmsg$IPCTNL_MSG_TIMEOUT_GET(r1, &(0x7f0000000140)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x10000000}, 0xc, &(0x7f0000000040)={&(0x7f0000000200)={0x70, 0x1, 0x8, 0x801, 0x0, 0x0, {0x1}, [@CTA_TIMEOUT_DATA={0x3c, 0x4, 0x0, 0x1, @sctp=[@CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED={0x8, 0x9, 0x1, 0x0, 0x9}, @CTA_TIMEOUT_SCTP_HEARTBEAT_SENT={0x8, 0x8, 0x1, 0x0, 0x81}, @CTA_TIMEOUT_SCTP_SHUTDOWN_RECD={0x8}, @CTA_TIMEOUT_SCTP_COOKIE_WAIT={0x8, 0x2, 0x1, 0x0, 0x3}, @CTA_TIMEOUT_SCTP_HEARTBEAT_SENT={0x8, 0x8, 0x1, 0x0, 0xffff2b15}, @CTA_TIMEOUT_SCTP_HEARTBEAT_SENT={0x8}, @CTA_TIMEOUT_SCTP_COOKIE_WAIT={0x8, 0x2, 0x1, 0x0, 0x2}]}, @CTA_TIMEOUT_NAME={0x9, 0x1, 'syz0\x00'}, @CTA_TIMEOUT_NAME={0x9, 0x1, 'syz0\x00'}, @CTA_TIMEOUT_L3PROTO={0x6, 0x2, 0x1, 0x0, 0x88f8}]}, 0x70}, 0x1, 0x0, 0x0, 0xc000814}, 0x1) (async) sendmsg$IPCTNL_MSG_TIMEOUT_GET(r1, &(0x7f0000000140)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x10000000}, 0xc, &(0x7f0000000040)={&(0x7f0000000200)={0x70, 0x1, 0x8, 0x801, 0x0, 0x0, {0x1}, [@CTA_TIMEOUT_DATA={0x3c, 0x4, 0x0, 0x1, @sctp=[@CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED={0x8, 0x9, 0x1, 0x0, 0x9}, @CTA_TIMEOUT_SCTP_HEARTBEAT_SENT={0x8, 0x8, 0x1, 0x0, 0x81}, @CTA_TIMEOUT_SCTP_SHUTDOWN_RECD={0x8}, @CTA_TIMEOUT_SCTP_COOKIE_WAIT={0x8, 0x2, 0x1, 0x0, 0x3}, @CTA_TIMEOUT_SCTP_HEARTBEAT_SENT={0x8, 0x8, 0x1, 0x0, 0xffff2b15}, @CTA_TIMEOUT_SCTP_HEARTBEAT_SENT={0x8}, @CTA_TIMEOUT_SCTP_COOKIE_WAIT={0x8, 0x2, 0x1, 0x0, 0x2}]}, @CTA_TIMEOUT_NAME={0x9, 0x1, 'syz0\x00'}, @CTA_TIMEOUT_NAME={0x9, 0x1, 'syz0\x00'}, @CTA_TIMEOUT_L3PROTO={0x6, 0x2, 0x1, 0x0, 0x88f8}]}, 0x70}, 0x1, 0x0, 0x0, 0xc000814}, 0x1) 01:55:15 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x3c, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1966.114994][T27843] bond907: (slave bridge874): making interface the new active one [ 1966.123256][T27843] bridge874: entered promiscuous mode [ 1966.133793][T27843] bond907: (slave bridge874): Enslaving as an active interface with an up link [ 1966.208138][T27846] bond1013: entered promiscuous mode [ 1966.214474][T27846] 8021q: adding VLAN 0 to HW filter on device bond1013 01:55:15 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x600, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1966.265429][T27847] bond1013: (slave bridge978): making interface the new active one [ 1966.273609][T27847] bridge978: entered promiscuous mode [ 1966.283392][T27847] bond1013: (slave bridge978): Enslaving as an active interface with an up link [ 1966.387100][T27851] bond961: entered promiscuous mode [ 1966.397130][T27851] 8021q: adding VLAN 0 to HW filter on device bond961 [ 1966.470513][T27853] bond961: (slave bridge919): making interface the new active one [ 1966.484217][T27853] bridge919: entered promiscuous mode [ 1966.495667][T27853] bond961: (slave bridge919): Enslaving as an active interface with an up link 01:55:16 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x6a03, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1966.577960][T27855] bond357: entered promiscuous mode 01:55:16 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x0, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x262, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1966.598112][T27855] 8021q: adding VLAN 0 to HW filter on device bond357 [ 1966.660061][T27859] workqueue: Failed to create a rescuer kthread for wq "bond526": -EINTR 01:55:16 executing program 1: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22, 0x1000, @rand_addr=' \x01\x00'}, 0x1c) listen(r0, 0x0) accept4(r0, 0x0, 0x0, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket(0x10, 0x803, 0x0) r4 = socket$nl_route(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r4, &(0x7f0000000240)={0x0, 0x0, &(0x7f00000002c0)={&(0x7f0000000040)=ANY=[], 0x24}, 0x1, 0x0, 0x0, 0x80}, 0x0) ioctl$sock_SIOCSIFVLAN_ADD_VLAN_CMD(r4, 0x8983, &(0x7f00000000c0)={0x0, 'xfrm0\x00'}) r5 = socket$nl_generic(0x10, 0x3, 0x10) ioctl$ifreq_SIOCGIFINDEX_batadv_mesh(r5, 0x8933, &(0x7f0000000080)={'batadv0\x00', 0x0}) r7 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r7, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r7, 0x0) accept4(r7, 0x0, 0x0, 0x0) ioctl$sock_inet_SIOCSIFFLAGS(r7, 0x8914, &(0x7f0000000040)={'team_slave_0\x00', 0x4000}) setsockopt$inet6_tcp_int(r7, 0x6, 0x7, &(0x7f0000000280)=0x7, 0x4) sendmsg$BATADV_CMD_GET_HARDIF(0xffffffffffffffff, &(0x7f0000000200)={0x0, 0x0, &(0x7f00000001c0)={&(0x7f0000000000)=ANY=[@ANYBLOB='$\x00\x00\x00', @ANYRES16, @ANYBLOB="010090c90000000000000500000008000300", @ANYRES8=r6], 0x24}}, 0x0) r8 = socket$nl_generic(0x10, 0x3, 0x10) ioctl$ifreq_SIOCGIFINDEX_batadv_mesh(r8, 0x8933, &(0x7f0000000080)={'batadv0\x00', 0x0}) sendmsg$BATADV_CMD_GET_HARDIF(0xffffffffffffffff, &(0x7f0000000200)={0x0, 0x0, &(0x7f00000001c0)={&(0x7f0000000000)=ANY=[@ANYBLOB='$\x00\x00\x00', @ANYRES16, @ANYBLOB="010090c90000000000000500000008000300", @ANYRES8=r9], 0x24}}, 0x0) sendmsg$nl_route(r4, &(0x7f00000001c0)={&(0x7f0000000100)={0x10, 0x0, 0x0, 0x400}, 0xc, &(0x7f0000000180)={&(0x7f0000000140)=@RTM_GETMDB={0x18, 0x56, 0x100, 0x70bd2b, 0x25dfdbfb, {0x7, r9}, ["", "", "", ""]}, 0x18}, 0x1, 0x0, 0x0, 0x44}, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r3, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r3, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r10, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) socket(0x26, 0x3, 0x8) sendmsg$nl_route(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r10}]}, 0x4c}}, 0x0) [ 1966.812021][T27864] workqueue: Failed to create a rescuer kthread for wq "bond526": -EINTR [ 1966.941115][T27870] bond908: entered promiscuous mode [ 1966.956046][T27870] 8021q: adding VLAN 0 to HW filter on device bond908 [ 1966.998776][T27872] bond908: (slave bridge875): making interface the new active one [ 1967.006928][T27872] bridge875: entered promiscuous mode [ 1967.026389][T27872] bond908: (slave bridge875): Enslaving as an active interface with an up link 01:55:16 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x48, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1967.073063][T27877] bond1014: entered promiscuous mode [ 1967.080002][T27877] 8021q: adding VLAN 0 to HW filter on device bond1014 01:55:16 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f00000000c0)={'#! ', './file0'}, 0xb) r1 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r1, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r1, 0x0) r2 = accept4(r1, 0x0, 0x0, 0x0) connect$unix(r2, &(0x7f0000000080)=@file={0x0, './file0\x00'}, 0x6e) sendto$inet6(r2, &(0x7f00000000c0), 0xfffffdda, 0x0, 0x0, 0x600000000000004) sendfile(r0, r2, &(0x7f0000000000)=0x8, 0x9) [ 1967.167941][T27879] bond1014: (slave bridge979): making interface the new active one [ 1967.177690][T27879] bridge979: entered promiscuous mode [ 1967.203716][T27879] bond1014: (slave bridge979): Enslaving as an active interface with an up link 01:55:16 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x700, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1967.264544][T27882] bond962: entered promiscuous mode [ 1967.270313][T27882] 8021q: adding VLAN 0 to HW filter on device bond962 01:55:17 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x6c00, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1967.319790][T27883] bond962: (slave bridge920): making interface the new active one [ 1967.329734][T27883] bridge920: entered promiscuous mode [ 1967.340663][T27883] bond962: (slave bridge920): Enslaving as an active interface with an up link [ 1967.486966][T27888] bond358 (uninitialized): Released all slaves 01:55:17 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x0, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x262, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1967.627688][T27897] bond526 (uninitialized): Released all slaves [ 1967.808402][T27899] bond909: entered promiscuous mode [ 1967.820715][T27899] 8021q: adding VLAN 0 to HW filter on device bond909 [ 1967.952062][T27904] bond909: (slave bridge876): making interface the new active one [ 1967.960380][T27904] bridge876: entered promiscuous mode [ 1967.976057][T27904] bond909: (slave bridge876): Enslaving as an active interface with an up link [ 1967.994452][T27907] validate_nla: 18 callbacks suppressed 01:55:17 executing program 1: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22, 0x1000, @rand_addr=' \x01\x00'}, 0x1c) (async) listen(r0, 0x0) (async) accept4(r0, 0x0, 0x0, 0x0) (async) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket$netlink(0x10, 0x3, 0x0) (async) r3 = socket(0x10, 0x803, 0x0) (async) r4 = socket$nl_route(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r4, &(0x7f0000000240)={0x0, 0x0, &(0x7f00000002c0)={&(0x7f0000000040)=ANY=[], 0x24}, 0x1, 0x0, 0x0, 0x80}, 0x0) (async) ioctl$sock_SIOCSIFVLAN_ADD_VLAN_CMD(r4, 0x8983, &(0x7f00000000c0)={0x0, 'xfrm0\x00'}) (async) r5 = socket$nl_generic(0x10, 0x3, 0x10) ioctl$ifreq_SIOCGIFINDEX_batadv_mesh(r5, 0x8933, &(0x7f0000000080)={'batadv0\x00', 0x0}) r7 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r7, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) (async) listen(r7, 0x0) accept4(r7, 0x0, 0x0, 0x0) ioctl$sock_inet_SIOCSIFFLAGS(r7, 0x8914, &(0x7f0000000040)={'team_slave_0\x00', 0x4000}) (async) setsockopt$inet6_tcp_int(r7, 0x6, 0x7, &(0x7f0000000280)=0x7, 0x4) (async) sendmsg$BATADV_CMD_GET_HARDIF(0xffffffffffffffff, &(0x7f0000000200)={0x0, 0x0, &(0x7f00000001c0)={&(0x7f0000000000)=ANY=[@ANYBLOB='$\x00\x00\x00', @ANYRES16, @ANYBLOB="010090c90000000000000500000008000300", @ANYRES8=r6], 0x24}}, 0x0) r8 = socket$nl_generic(0x10, 0x3, 0x10) ioctl$ifreq_SIOCGIFINDEX_batadv_mesh(r8, 0x8933, &(0x7f0000000080)={'batadv0\x00', 0x0}) sendmsg$BATADV_CMD_GET_HARDIF(0xffffffffffffffff, &(0x7f0000000200)={0x0, 0x0, &(0x7f00000001c0)={&(0x7f0000000000)=ANY=[@ANYBLOB='$\x00\x00\x00', @ANYRES16, @ANYBLOB="010090c90000000000000500000008000300", @ANYRES8=r9], 0x24}}, 0x0) (async) sendmsg$nl_route(r4, &(0x7f00000001c0)={&(0x7f0000000100)={0x10, 0x0, 0x0, 0x400}, 0xc, &(0x7f0000000180)={&(0x7f0000000140)=@RTM_GETMDB={0x18, 0x56, 0x100, 0x70bd2b, 0x25dfdbfb, {0x7, r9}, ["", "", "", ""]}, 0x18}, 0x1, 0x0, 0x0, 0x44}, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r3, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) (async) getsockname$packet(r3, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r10, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) socket(0x26, 0x3, 0x8) (async) sendmsg$nl_route(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r10}]}, 0x4c}}, 0x0) 01:55:17 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x4a, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) 01:55:17 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f00000000c0)={'#! ', './file0'}, 0xb) (async, rerun: 64) r1 = socket$inet6_tcp(0xa, 0x1, 0x0) (rerun: 64) bind$inet6(r1, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) (async, rerun: 64) listen(r1, 0x0) (async, rerun: 64) r2 = accept4(r1, 0x0, 0x0, 0x0) connect$unix(r2, &(0x7f0000000080)=@file={0x0, './file0\x00'}, 0x6e) sendto$inet6(r2, &(0x7f00000000c0), 0xfffffdda, 0x0, 0x0, 0x600000000000004) (async) sendfile(r0, r2, &(0x7f0000000000)=0x8, 0x9) [ 1967.994469][T27907] netlink: 'syz-executor.0': attribute type 1 has an invalid length. 01:55:17 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x900, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1968.096092][T27907] bond1015: entered promiscuous mode [ 1968.115335][T27907] 8021q: adding VLAN 0 to HW filter on device bond1015 [ 1968.134036][T27910] netlink: 'syz-executor.3': attribute type 1 has an invalid length. 01:55:17 executing program 1: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22, 0x1000, @rand_addr=' \x01\x00'}, 0x1c) listen(r0, 0x0) (async, rerun: 32) accept4(r0, 0x0, 0x0, 0x0) (async, rerun: 32) r1 = socket$netlink(0x10, 0x3, 0x0) (async) r2 = socket$netlink(0x10, 0x3, 0x0) (async) r3 = socket(0x10, 0x803, 0x0) r4 = socket$nl_route(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r4, &(0x7f0000000240)={0x0, 0x0, &(0x7f00000002c0)={&(0x7f0000000040)=ANY=[], 0x24}, 0x1, 0x0, 0x0, 0x80}, 0x0) (async) ioctl$sock_SIOCSIFVLAN_ADD_VLAN_CMD(r4, 0x8983, &(0x7f00000000c0)={0x0, 'xfrm0\x00'}) r5 = socket$nl_generic(0x10, 0x3, 0x10) ioctl$ifreq_SIOCGIFINDEX_batadv_mesh(r5, 0x8933, &(0x7f0000000080)={'batadv0\x00', 0x0}) r7 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r7, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r7, 0x0) (async) accept4(r7, 0x0, 0x0, 0x0) ioctl$sock_inet_SIOCSIFFLAGS(r7, 0x8914, &(0x7f0000000040)={'team_slave_0\x00', 0x4000}) setsockopt$inet6_tcp_int(r7, 0x6, 0x7, &(0x7f0000000280)=0x7, 0x4) (async) sendmsg$BATADV_CMD_GET_HARDIF(0xffffffffffffffff, &(0x7f0000000200)={0x0, 0x0, &(0x7f00000001c0)={&(0x7f0000000000)=ANY=[@ANYBLOB='$\x00\x00\x00', @ANYRES16, @ANYBLOB="010090c90000000000000500000008000300", @ANYRES8=r6], 0x24}}, 0x0) (async) r8 = socket$nl_generic(0x10, 0x3, 0x10) ioctl$ifreq_SIOCGIFINDEX_batadv_mesh(r8, 0x8933, &(0x7f0000000080)={'batadv0\x00', 0x0}) sendmsg$BATADV_CMD_GET_HARDIF(0xffffffffffffffff, &(0x7f0000000200)={0x0, 0x0, &(0x7f00000001c0)={&(0x7f0000000000)=ANY=[@ANYBLOB='$\x00\x00\x00', @ANYRES16, @ANYBLOB="010090c90000000000000500000008000300", @ANYRES8=r9], 0x24}}, 0x0) sendmsg$nl_route(r4, &(0x7f00000001c0)={&(0x7f0000000100)={0x10, 0x0, 0x0, 0x400}, 0xc, &(0x7f0000000180)={&(0x7f0000000140)=@RTM_GETMDB={0x18, 0x56, 0x100, 0x70bd2b, 0x25dfdbfb, {0x7, r9}, ["", "", "", ""]}, 0x18}, 0x1, 0x0, 0x0, 0x44}, 0x0) (async, rerun: 32) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r3, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) (rerun: 32) getsockname$packet(r3, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r10, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) (async) socket(0x26, 0x3, 0x8) (async) sendmsg$nl_route(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r10}]}, 0x4c}}, 0x0) [ 1968.261576][T27910] bond963: entered promiscuous mode [ 1968.277370][T27910] 8021q: adding VLAN 0 to HW filter on device bond963 [ 1968.381566][T27912] bond963: (slave bridge921): making interface the new active one [ 1968.414813][T27912] bridge921: entered promiscuous mode [ 1968.425356][T27912] bond963: (slave bridge921): Enslaving as an active interface with an up link [ 1968.454584][T27917] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 1968.469516][T27917] bond358 (uninitialized): Released all slaves 01:55:18 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x7400, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) 01:55:18 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x0, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x262, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1968.577221][T27923] netlink: 'syz-executor.2': attribute type 1 has an invalid length. [ 1968.682385][T27923] bond910: entered promiscuous mode [ 1968.691399][T27923] 8021q: adding VLAN 0 to HW filter on device bond910 [ 1968.729647][T27930] netlink: 'syz-executor.0': attribute type 1 has an invalid length. [ 1968.774353][T27930] bond1016: entered promiscuous mode [ 1968.780109][T27930] 8021q: adding VLAN 0 to HW filter on device bond1016 [ 1968.830945][T27932] bond910: (slave bridge877): making interface the new active one [ 1968.846663][T27932] bridge877: entered promiscuous mode [ 1968.857897][T27932] bond910: (slave bridge877): Enslaving as an active interface with an up link 01:55:18 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x4c, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1968.929873][T27936] bond1016: (slave bridge980): making interface the new active one [ 1968.942716][T27936] bridge980: entered promiscuous mode [ 1968.961523][T27936] bond1016: (slave bridge980): Enslaving as an active interface with an up link 01:55:18 executing program 1: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r0, 0x0) r1 = accept4(r0, 0x0, 0x0, 0x0) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket$netlink(0x10, 0x3, 0x0) r4 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r4, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) r6 = bpf$OBJ_GET_MAP(0x7, &(0x7f0000000500)={&(0x7f00000004c0)='./file0\x00', 0x0, 0x8}, 0x10) r7 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) r8 = socket$nl_generic(0x10, 0x3, 0x10) sendfile(r8, r7, 0x0, 0x100000002) r9 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) r10 = socket$nl_generic(0x10, 0x3, 0x10) sendfile(r10, r9, 0x0, 0x100000002) r11 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000400)='cgroup.controllers\x00', 0x275a, 0x0) write$binfmt_script(r11, &(0x7f00000003c0)=ANY=[], 0x208e24b) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r11, 0x0) r12 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000400)='cgroup.controllers\x00', 0x275a, 0x0) write$binfmt_script(r12, &(0x7f00000003c0)=ANY=[], 0x208e24b) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r12, 0x0) r13 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) r14 = socket$nl_generic(0x10, 0x3, 0x10) sendfile(r14, r13, 0x0, 0x100000002) bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, &(0x7f00000007c0)={0x11, 0x3, &(0x7f0000000280)=@raw=[@btf_id={0x18, 0x7, 0x3, 0x0, 0x5}, @generic={0x80, 0x3, 0x1, 0x200, 0x2}], &(0x7f00000002c0)='GPL\x00', 0x3, 0xca, &(0x7f00000006c0)=""/202, 0x41000, 0x2, '\x00', r5, 0x0, 0xffffffffffffffff, 0x8, &(0x7f0000000340)={0xa, 0x4}, 0x8, 0x10, &(0x7f0000000380)={0x1, 0xf, 0x4, 0x8}, 0x10, 0x0, 0x0, 0x0, &(0x7f00000005c0)=[0xffffffffffffffff, 0xffffffffffffffff, r6, r7, r9, r11, r12, r13, 0x1]}, 0x80) sendmsg$nl_route(r3, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r5, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r2, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x4c}}, 0x0) ioctl$sock_kcm_SIOCKCMCLONE(r1, 0x89e2, &(0x7f0000000100)={r2}) getsockopt$packet_buf(r15, 0x107, 0xd, &(0x7f0000000140)=""/170, &(0x7f0000000200)=0xaa) [ 1968.979502][T27945] netlink: 'syz-executor.3': attribute type 1 has an invalid length. 01:55:18 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0xa00, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) 01:55:18 executing program 5: openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) (async) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f00000000c0)={'#! ', './file0'}, 0xb) r1 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r1, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r1, 0x0) (async) listen(r1, 0x0) r2 = accept4(r1, 0x0, 0x0, 0x0) connect$unix(r2, &(0x7f0000000080)=@file={0x0, './file0\x00'}, 0x6e) sendto$inet6(r2, &(0x7f00000000c0), 0xfffffdda, 0x0, 0x0, 0x600000000000004) sendfile(r0, r2, &(0x7f0000000000)=0x8, 0x9) (async) sendfile(r0, r2, &(0x7f0000000000)=0x8, 0x9) [ 1969.077486][T27945] bond964: entered promiscuous mode [ 1969.085365][T27945] 8021q: adding VLAN 0 to HW filter on device bond964 [ 1969.240067][T27947] bond964: (slave bridge922): making interface the new active one [ 1969.248269][T27947] bridge922: entered promiscuous mode [ 1969.267070][T27947] bond964: (slave bridge922): Enslaving as an active interface with an up link 01:55:18 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x7a00, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1969.299192][T27951] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 1969.344380][T27951] bond358 (uninitialized): Released all slaves [ 1969.384673][T27955] netlink: 'syz-executor.2': attribute type 1 has an invalid length. 01:55:19 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x0, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x262, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1969.483363][T27955] bond911: entered promiscuous mode [ 1969.499087][T27955] 8021q: adding VLAN 0 to HW filter on device bond911 [ 1969.577771][T27957] bond911: (slave bridge878): making interface the new active one [ 1969.585910][T27957] bridge878: entered promiscuous mode [ 1969.595753][T27957] bond911: (slave bridge878): Enslaving as an active interface with an up link [ 1969.605067][T27964] netlink: 'syz-executor.0': attribute type 1 has an invalid length. 01:55:19 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x60, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1969.677740][T27964] bond1017: entered promiscuous mode [ 1969.705384][T27964] 8021q: adding VLAN 0 to HW filter on device bond1017 [ 1969.790657][T27968] bond1017: (slave bridge981): making interface the new active one [ 1969.802510][T27968] bridge981: entered promiscuous mode [ 1969.812736][T27968] bond1017: (slave bridge981): Enslaving as an active interface with an up link [ 1969.823546][T27973] netlink: 'syz-executor.3': attribute type 1 has an invalid length. 01:55:19 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0xa03, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1969.927951][T27973] bond965: entered promiscuous mode [ 1969.940102][T27973] 8021q: adding VLAN 0 to HW filter on device bond965 01:55:19 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f00000000c0)={'#! ', './file0'}, 0xb) mmap(&(0x7f0000002000/0x4000)=nil, 0x4000, 0x1, 0x1010, r0, 0x48b79000) r1 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r1, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r1, 0x0) r2 = accept4(r1, 0x0, 0x0, 0x0) connect$unix(r2, &(0x7f0000000080)=@file={0x0, './file0\x00'}, 0x6e) sendto$inet6(r2, &(0x7f00000000c0), 0xfffffdda, 0x0, 0x0, 0x600000000000004) getsockname$netrom(r2, &(0x7f0000000000)={{0x3, @netrom}, [@rose, @default, @bcast, @null, @null, @null, @default, @default]}, &(0x7f0000000100)=0x48) 01:55:19 executing program 1: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r0, 0x0) accept4(r0, 0x0, 0x0, 0x0) (async) r1 = accept4(r0, 0x0, 0x0, 0x0) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket$netlink(0x10, 0x3, 0x0) socket(0x10, 0x803, 0x0) (async) r4 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r4, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) r6 = bpf$OBJ_GET_MAP(0x7, &(0x7f0000000500)={&(0x7f00000004c0)='./file0\x00', 0x0, 0x8}, 0x10) r7 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) r8 = socket$nl_generic(0x10, 0x3, 0x10) sendfile(r8, r7, 0x0, 0x100000002) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) (async) r9 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) r10 = socket$nl_generic(0x10, 0x3, 0x10) sendfile(r10, r9, 0x0, 0x100000002) (async) sendfile(r10, r9, 0x0, 0x100000002) r11 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000400)='cgroup.controllers\x00', 0x275a, 0x0) write$binfmt_script(r11, &(0x7f00000003c0)=ANY=[], 0x208e24b) (async) write$binfmt_script(r11, &(0x7f00000003c0)=ANY=[], 0x208e24b) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r11, 0x0) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000400)='cgroup.controllers\x00', 0x275a, 0x0) (async) r12 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000400)='cgroup.controllers\x00', 0x275a, 0x0) write$binfmt_script(r12, &(0x7f00000003c0)=ANY=[], 0x208e24b) (async) write$binfmt_script(r12, &(0x7f00000003c0)=ANY=[], 0x208e24b) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r12, 0x0) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) (async) r13 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) r14 = socket$nl_generic(0x10, 0x3, 0x10) sendfile(r14, r13, 0x0, 0x100000002) bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, &(0x7f00000007c0)={0x11, 0x3, &(0x7f0000000280)=@raw=[@btf_id={0x18, 0x7, 0x3, 0x0, 0x5}, @generic={0x80, 0x3, 0x1, 0x200, 0x2}], &(0x7f00000002c0)='GPL\x00', 0x3, 0xca, &(0x7f00000006c0)=""/202, 0x41000, 0x2, '\x00', r5, 0x0, 0xffffffffffffffff, 0x8, &(0x7f0000000340)={0xa, 0x4}, 0x8, 0x10, &(0x7f0000000380)={0x1, 0xf, 0x4, 0x8}, 0x10, 0x0, 0x0, 0x0, &(0x7f00000005c0)=[0xffffffffffffffff, 0xffffffffffffffff, r6, r7, r9, r11, r12, r13, 0x1]}, 0x80) sendmsg$nl_route(r3, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r5, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) (async) sendmsg$nl_route(r3, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r5, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r2, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x4c}}, 0x0) ioctl$sock_kcm_SIOCKCMCLONE(r1, 0x89e2, &(0x7f0000000100)={r2}) getsockopt$packet_buf(r15, 0x107, 0xd, &(0x7f0000000140)=""/170, &(0x7f0000000200)=0xaa) (async) getsockopt$packet_buf(r15, 0x107, 0xd, &(0x7f0000000140)=""/170, &(0x7f0000000200)=0xaa) [ 1970.016526][T27975] bond965: (slave bridge923): making interface the new active one [ 1970.024880][T27975] bridge923: entered promiscuous mode [ 1970.036476][T27975] bond965: (slave bridge923): Enslaving as an active interface with an up link 01:55:19 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x7a03, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1970.127917][T27978] bond358 (uninitialized): Released all slaves 01:55:19 executing program 1: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) (async) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r0, 0x0) (async) listen(r0, 0x0) r1 = accept4(r0, 0x0, 0x0, 0x0) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket$netlink(0x10, 0x3, 0x0) r4 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r4, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) (async) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r4, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) (async) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) r6 = bpf$OBJ_GET_MAP(0x7, &(0x7f0000000500)={&(0x7f00000004c0)='./file0\x00', 0x0, 0x8}, 0x10) r7 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) socket$nl_generic(0x10, 0x3, 0x10) (async) r8 = socket$nl_generic(0x10, 0x3, 0x10) sendfile(r8, r7, 0x0, 0x100000002) (async) sendfile(r8, r7, 0x0, 0x100000002) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) (async) r9 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) r10 = socket$nl_generic(0x10, 0x3, 0x10) sendfile(r10, r9, 0x0, 0x100000002) r11 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000400)='cgroup.controllers\x00', 0x275a, 0x0) write$binfmt_script(r11, &(0x7f00000003c0)=ANY=[], 0x208e24b) (async) write$binfmt_script(r11, &(0x7f00000003c0)=ANY=[], 0x208e24b) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r11, 0x0) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000400)='cgroup.controllers\x00', 0x275a, 0x0) (async) r12 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000400)='cgroup.controllers\x00', 0x275a, 0x0) write$binfmt_script(r12, &(0x7f00000003c0)=ANY=[], 0x208e24b) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r12, 0x0) r13 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) r14 = socket$nl_generic(0x10, 0x3, 0x10) sendfile(r14, r13, 0x0, 0x100000002) bpf$BPF_PROG_RAW_TRACEPOINT_LOAD(0x5, &(0x7f00000007c0)={0x11, 0x3, &(0x7f0000000280)=@raw=[@btf_id={0x18, 0x7, 0x3, 0x0, 0x5}, @generic={0x80, 0x3, 0x1, 0x200, 0x2}], &(0x7f00000002c0)='GPL\x00', 0x3, 0xca, &(0x7f00000006c0)=""/202, 0x41000, 0x2, '\x00', r5, 0x0, 0xffffffffffffffff, 0x8, &(0x7f0000000340)={0xa, 0x4}, 0x8, 0x10, &(0x7f0000000380)={0x1, 0xf, 0x4, 0x8}, 0x10, 0x0, 0x0, 0x0, &(0x7f00000005c0)=[0xffffffffffffffff, 0xffffffffffffffff, r6, r7, r9, r11, r12, r13, 0x1]}, 0x80) sendmsg$nl_route(r3, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r5, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) (async) sendmsg$nl_route(r3, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r5, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r2, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x4c}}, 0x0) ioctl$sock_kcm_SIOCKCMCLONE(r1, 0x89e2, &(0x7f0000000100)={r2}) getsockopt$packet_buf(r15, 0x107, 0xd, &(0x7f0000000140)=""/170, &(0x7f0000000200)=0xaa) 01:55:19 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x0, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x262, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1970.315756][T27982] bond912: entered promiscuous mode [ 1970.323044][T27982] 8021q: adding VLAN 0 to HW filter on device bond912 [ 1970.372084][T27983] bond912: (slave bridge879): making interface the new active one [ 1970.380189][T27983] bridge879: entered promiscuous mode [ 1970.390202][T27983] bond912: (slave bridge879): Enslaving as an active interface with an up link 01:55:20 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x68, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) 01:55:20 executing program 1: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r0, 0x0) r1 = accept4(r0, 0x0, 0x0, 0x0) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket$netlink(0x10, 0x3, 0x0) r4 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r4, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r4, 0x0) accept4(r4, 0x0, 0x0, 0x0) ioctl$sock_inet_SIOCSIFFLAGS(r4, 0x8914, &(0x7f0000000040)={'team_slave_0\x00', 0x4000}) connect$inet6(r4, &(0x7f0000000380)={0xa, 0x4e23, 0x3, @private2, 0x3ff}, 0x1c) sendmsg$nl_route(r1, &(0x7f0000000340)={&(0x7f0000000280)={0x10, 0x0, 0x0, 0x800000}, 0xc, &(0x7f0000000300)={&(0x7f00000002c0)=@getrule={0x14, 0x22, 0x2, 0x70bd29, 0x25dfdbfe, {}, ["", "", "", "", ""]}, 0x14}}, 0x1) r5 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r5, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r5, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) r7 = socket$nl_netfilter(0x10, 0x3, 0xc) sendmsg$NFULNL_MSG_CONFIG(r7, &(0x7f0000000200)={&(0x7f0000000100)={0x10, 0x0, 0x0, 0x4000000}, 0xc, &(0x7f00000001c0)={&(0x7f0000000140)={0x4c, 0x1, 0x4, 0x201, 0x0, 0x0, {0x1, 0x0, 0x7}, [@NFULA_CFG_MODE={0xa, 0x2, {0xe2a8, 0x1}}, @NFULA_CFG_MODE={0xa, 0x2, {0xfff, 0x2}}, @NFULA_CFG_FLAGS={0x6, 0x6, 0x1, 0x0, 0x5}, @NFULA_CFG_TIMEOUT={0x8, 0x4, 0x1, 0x0, 0x44}, @NFULA_CFG_TIMEOUT={0x8, 0x4, 0x1, 0x0, 0x1000}, @NFULA_CFG_QTHRESH={0x8, 0x5, 0x1, 0x0, 0x9}]}, 0x4c}, 0x1, 0x0, 0x0, 0x40}, 0x4048800) r8 = bpf$ITER_CREATE(0x21, &(0x7f0000000480), 0x8) sendmsg$IPSET_CMD_CREATE(r8, &(0x7f0000000640)={&(0x7f00000004c0)={0x10, 0x0, 0x0, 0x80}, 0xc, &(0x7f00000005c0)={&(0x7f0000000500)={0x40, 0x2, 0x6, 0x201, 0x0, 0x0, {0x5, 0x0, 0x8}, [@IPSET_ATTR_TYPENAME={0x11, 0x3, 'hash:ip,mark\x00'}, @IPSET_ATTR_SETNAME={0x9, 0x2, 'syz2\x00'}, @IPSET_ATTR_SETNAME={0x9, 0x2, 'syz0\x00'}]}, 0x40}, 0x1, 0x0, 0x0, 0x40}, 0x800) sendmsg$nl_route(r3, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r6, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r2, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r6}]}, 0x4c}}, 0x0) [ 1970.540489][T27986] bond1018: entered promiscuous mode [ 1970.586147][T27986] 8021q: adding VLAN 0 to HW filter on device bond1018 [ 1970.704813][T27987] bond1018: (slave bridge982): making interface the new active one [ 1970.713415][T27987] bridge982: entered promiscuous mode [ 1970.725841][T27987] bond1018: (slave bridge982): Enslaving as an active interface with an up link 01:55:20 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0xc00, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1970.826066][T28000] bond966: entered promiscuous mode [ 1970.847055][T28000] 8021q: adding VLAN 0 to HW filter on device bond966 01:55:20 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f00000000c0)={'#! ', './file0'}, 0xb) (async) mmap(&(0x7f0000002000/0x4000)=nil, 0x4000, 0x1, 0x1010, r0, 0x48b79000) (async) r1 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r1, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r1, 0x0) (async) r2 = accept4(r1, 0x0, 0x0, 0x0) connect$unix(r2, &(0x7f0000000080)=@file={0x0, './file0\x00'}, 0x6e) sendto$inet6(r2, &(0x7f00000000c0), 0xfffffdda, 0x0, 0x0, 0x600000000000004) (async) getsockname$netrom(r2, &(0x7f0000000000)={{0x3, @netrom}, [@rose, @default, @bcast, @null, @null, @null, @default, @default]}, &(0x7f0000000100)=0x48) [ 1970.953910][T28006] bond966: (slave bridge924): making interface the new active one [ 1970.965068][T28006] bridge924: entered promiscuous mode [ 1970.976691][T28006] bond966: (slave bridge924): Enslaving as an active interface with an up link [ 1970.988647][T28008] bond358 (uninitialized): Released all slaves 01:55:20 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x8011, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) 01:55:20 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x0, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x262, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1971.177161][T28015] bond913: entered promiscuous mode [ 1971.182974][T28015] 8021q: adding VLAN 0 to HW filter on device bond913 01:55:20 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x6c, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1971.253871][T28016] bond913: (slave bridge880): making interface the new active one [ 1971.266937][T28016] bridge880: entered promiscuous mode [ 1971.276898][T28016] bond913: (slave bridge880): Enslaving as an active interface with an up link [ 1971.390705][T28023] bond526: entered promiscuous mode [ 1971.398261][T28023] 8021q: adding VLAN 0 to HW filter on device bond526 [ 1971.465009][T28025] bond1019: entered promiscuous mode [ 1971.470844][T28025] 8021q: adding VLAN 0 to HW filter on device bond1019 01:55:21 executing program 1: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) (async) listen(r0, 0x0) r1 = accept4(r0, 0x0, 0x0, 0x0) (async) r2 = socket$netlink(0x10, 0x3, 0x0) (async) r3 = socket$netlink(0x10, 0x3, 0x0) r4 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r4, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) (async) listen(r4, 0x0) (async) accept4(r4, 0x0, 0x0, 0x0) (async) ioctl$sock_inet_SIOCSIFFLAGS(r4, 0x8914, &(0x7f0000000040)={'team_slave_0\x00', 0x4000}) (async) connect$inet6(r4, &(0x7f0000000380)={0xa, 0x4e23, 0x3, @private2, 0x3ff}, 0x1c) sendmsg$nl_route(r1, &(0x7f0000000340)={&(0x7f0000000280)={0x10, 0x0, 0x0, 0x800000}, 0xc, &(0x7f0000000300)={&(0x7f00000002c0)=@getrule={0x14, 0x22, 0x2, 0x70bd29, 0x25dfdbfe, {}, ["", "", "", "", ""]}, 0x14}}, 0x1) (async) r5 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r5, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) (async) getsockname$packet(r5, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) (async) r7 = socket$nl_netfilter(0x10, 0x3, 0xc) sendmsg$NFULNL_MSG_CONFIG(r7, &(0x7f0000000200)={&(0x7f0000000100)={0x10, 0x0, 0x0, 0x4000000}, 0xc, &(0x7f00000001c0)={&(0x7f0000000140)={0x4c, 0x1, 0x4, 0x201, 0x0, 0x0, {0x1, 0x0, 0x7}, [@NFULA_CFG_MODE={0xa, 0x2, {0xe2a8, 0x1}}, @NFULA_CFG_MODE={0xa, 0x2, {0xfff, 0x2}}, @NFULA_CFG_FLAGS={0x6, 0x6, 0x1, 0x0, 0x5}, @NFULA_CFG_TIMEOUT={0x8, 0x4, 0x1, 0x0, 0x44}, @NFULA_CFG_TIMEOUT={0x8, 0x4, 0x1, 0x0, 0x1000}, @NFULA_CFG_QTHRESH={0x8, 0x5, 0x1, 0x0, 0x9}]}, 0x4c}, 0x1, 0x0, 0x0, 0x40}, 0x4048800) (async) r8 = bpf$ITER_CREATE(0x21, &(0x7f0000000480), 0x8) sendmsg$IPSET_CMD_CREATE(r8, &(0x7f0000000640)={&(0x7f00000004c0)={0x10, 0x0, 0x0, 0x80}, 0xc, &(0x7f00000005c0)={&(0x7f0000000500)={0x40, 0x2, 0x6, 0x201, 0x0, 0x0, {0x5, 0x0, 0x8}, [@IPSET_ATTR_TYPENAME={0x11, 0x3, 'hash:ip,mark\x00'}, @IPSET_ATTR_SETNAME={0x9, 0x2, 'syz2\x00'}, @IPSET_ATTR_SETNAME={0x9, 0x2, 'syz0\x00'}]}, 0x40}, 0x1, 0x0, 0x0, 0x40}, 0x800) (async) sendmsg$nl_route(r3, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r6, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) (async) sendmsg$nl_route(r2, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r6}]}, 0x4c}}, 0x0) [ 1971.527447][T28026] bond526: (slave bridge468): making interface the new active one [ 1971.536637][T28026] bridge468: entered promiscuous mode [ 1971.549069][T28026] bond526: (slave bridge468): Enslaving as an active interface with an up link [ 1971.627979][T28027] bond1019: (slave bridge983): making interface the new active one [ 1971.636749][T28027] bridge983: entered promiscuous mode [ 1971.661745][T28027] bond1019: (slave bridge983): Enslaving as an active interface with an up link 01:55:21 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0xe00, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) 01:55:21 executing program 1: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r0, 0x0) (async) r1 = accept4(r0, 0x0, 0x0, 0x0) (async, rerun: 64) r2 = socket$netlink(0x10, 0x3, 0x0) (rerun: 64) r3 = socket$netlink(0x10, 0x3, 0x0) (async, rerun: 32) r4 = socket$inet6_tcp(0xa, 0x1, 0x0) (rerun: 32) bind$inet6(r4, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) (async) listen(r4, 0x0) (async) accept4(r4, 0x0, 0x0, 0x0) (async) ioctl$sock_inet_SIOCSIFFLAGS(r4, 0x8914, &(0x7f0000000040)={'team_slave_0\x00', 0x4000}) (async) connect$inet6(r4, &(0x7f0000000380)={0xa, 0x4e23, 0x3, @private2, 0x3ff}, 0x1c) (async) sendmsg$nl_route(r1, &(0x7f0000000340)={&(0x7f0000000280)={0x10, 0x0, 0x0, 0x800000}, 0xc, &(0x7f0000000300)={&(0x7f00000002c0)=@getrule={0x14, 0x22, 0x2, 0x70bd29, 0x25dfdbfe, {}, ["", "", "", "", ""]}, 0x14}}, 0x1) (async) r5 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r5, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) (async) getsockname$packet(r5, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) (async) r7 = socket$nl_netfilter(0x10, 0x3, 0xc) sendmsg$NFULNL_MSG_CONFIG(r7, &(0x7f0000000200)={&(0x7f0000000100)={0x10, 0x0, 0x0, 0x4000000}, 0xc, &(0x7f00000001c0)={&(0x7f0000000140)={0x4c, 0x1, 0x4, 0x201, 0x0, 0x0, {0x1, 0x0, 0x7}, [@NFULA_CFG_MODE={0xa, 0x2, {0xe2a8, 0x1}}, @NFULA_CFG_MODE={0xa, 0x2, {0xfff, 0x2}}, @NFULA_CFG_FLAGS={0x6, 0x6, 0x1, 0x0, 0x5}, @NFULA_CFG_TIMEOUT={0x8, 0x4, 0x1, 0x0, 0x44}, @NFULA_CFG_TIMEOUT={0x8, 0x4, 0x1, 0x0, 0x1000}, @NFULA_CFG_QTHRESH={0x8, 0x5, 0x1, 0x0, 0x9}]}, 0x4c}, 0x1, 0x0, 0x0, 0x40}, 0x4048800) r8 = bpf$ITER_CREATE(0x21, &(0x7f0000000480), 0x8) sendmsg$IPSET_CMD_CREATE(r8, &(0x7f0000000640)={&(0x7f00000004c0)={0x10, 0x0, 0x0, 0x80}, 0xc, &(0x7f00000005c0)={&(0x7f0000000500)={0x40, 0x2, 0x6, 0x201, 0x0, 0x0, {0x5, 0x0, 0x8}, [@IPSET_ATTR_TYPENAME={0x11, 0x3, 'hash:ip,mark\x00'}, @IPSET_ATTR_SETNAME={0x9, 0x2, 'syz2\x00'}, @IPSET_ATTR_SETNAME={0x9, 0x2, 'syz0\x00'}]}, 0x40}, 0x1, 0x0, 0x0, 0x40}, 0x800) (async) sendmsg$nl_route(r3, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r6, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) (async) sendmsg$nl_route(r2, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r6}]}, 0x4c}}, 0x0) [ 1971.729543][T28035] bond967: entered promiscuous mode [ 1971.738859][T28035] 8021q: adding VLAN 0 to HW filter on device bond967 [ 1971.800965][T28039] bond967: (slave bridge925): making interface the new active one [ 1971.808881][T28039] bridge925: entered promiscuous mode [ 1971.818397][T28039] bond967: (slave bridge925): Enslaving as an active interface with an up link [ 1971.846800][T28041] bond358 (uninitialized): Released all slaves 01:55:21 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x8100, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) 01:55:21 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f00000000c0)={'#! ', './file0'}, 0xb) mmap(&(0x7f0000002000/0x4000)=nil, 0x4000, 0x1, 0x1010, r0, 0x48b79000) (async) mmap(&(0x7f0000002000/0x4000)=nil, 0x4000, 0x1, 0x1010, r0, 0x48b79000) r1 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r1, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r1, 0x0) r2 = accept4(r1, 0x0, 0x0, 0x0) connect$unix(r2, &(0x7f0000000080)=@file={0x0, './file0\x00'}, 0x6e) (async) connect$unix(r2, &(0x7f0000000080)=@file={0x0, './file0\x00'}, 0x6e) sendto$inet6(r2, &(0x7f00000000c0), 0xfffffdda, 0x0, 0x0, 0x600000000000004) getsockname$netrom(r2, &(0x7f0000000000)={{0x3, @netrom}, [@rose, @default, @bcast, @null, @null, @null, @default, @default]}, &(0x7f0000000100)=0x48) (async) getsockname$netrom(r2, &(0x7f0000000000)={{0x3, @netrom}, [@rose, @default, @bcast, @null, @null, @null, @default, @default]}, &(0x7f0000000100)=0x48) 01:55:21 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(0xffffffffffffffff, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x262, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1972.039300][T28045] bond914: entered promiscuous mode 01:55:21 executing program 1: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r0, 0x0) accept4(r0, 0x0, 0x0, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r3, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r3, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) r5 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r5, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r5, 0x0) r6 = accept4(r5, 0x0, 0x0, 0x0) connect$unix(r6, &(0x7f0000000080)=@file={0x0, './file0\x00'}, 0x6e) sendto$inet6(r6, &(0x7f00000000c0), 0xfffffdda, 0x0, 0x0, 0x600000000000004) r7 = socket$nl_generic(0x10, 0x3, 0x10) ioctl$ifreq_SIOCGIFINDEX_batadv_mesh(r7, 0x8933, &(0x7f0000000080)={'batadv0\x00', 0x0}) sendmsg$BATADV_CMD_GET_HARDIF(0xffffffffffffffff, &(0x7f0000000200)={0x0, 0x0, &(0x7f00000001c0)={&(0x7f0000000000)=ANY=[@ANYBLOB='$\x00\x00\x00', @ANYRES16, @ANYBLOB="010090c90000000000000500000008000300", @ANYRES8=r8], 0x24}}, 0x0) sendmsg$nl_route(r6, &(0x7f0000000180)={&(0x7f0000000100)={0x10, 0x0, 0x0, 0x4000}, 0xc, &(0x7f0000000140)={&(0x7f00000006c0)=@newlink={0x178, 0x10, 0x2, 0x70bd2d, 0x25dfdbfd, {0x0, 0x0, 0x0, r8, 0x20, 0x20008}, [@IFLA_AF_SPEC={0x134, 0x1a, 0x0, 0x1, [@AF_INET6={0x5c, 0xa, 0x0, 0x1, [@IFLA_INET6_ADDR_GEN_MODE={0x5}, @IFLA_INET6_ADDR_GEN_MODE={0x5, 0x8, 0xff}, @IFLA_INET6_ADDR_GEN_MODE={0x5, 0x8, 0x3f}, @IFLA_INET6_ADDR_GEN_MODE={0x5, 0x8, 0x20}, @IFLA_INET6_ADDR_GEN_MODE={0x5, 0x8, 0x4}, @IFLA_INET6_TOKEN={0x14, 0x7, @private0}, @IFLA_INET6_TOKEN={0x14, 0x7, @mcast2}, @IFLA_INET6_ADDR_GEN_MODE={0x5, 0x8, 0x20}]}, @AF_INET6={0x28, 0xa, 0x0, 0x1, [@IFLA_INET6_ADDR_GEN_MODE={0x5, 0x8, 0x8}, @IFLA_INET6_ADDR_GEN_MODE={0x5, 0x8, 0x8}, @IFLA_INET6_TOKEN={0x14, 0x7, @initdev={0xfe, 0x88, '\x00', 0x0, 0x0}}]}, @AF_INET6={0x38, 0xa, 0x0, 0x1, [@IFLA_INET6_ADDR_GEN_MODE={0x5, 0x8, 0x6}, @IFLA_INET6_ADDR_GEN_MODE={0x5, 0x8, 0xf}, @IFLA_INET6_ADDR_GEN_MODE={0x5, 0x8, 0x5}, @IFLA_INET6_TOKEN={0x14, 0x7, @loopback}, @IFLA_INET6_ADDR_GEN_MODE={0x5, 0x8, 0x5}]}, @AF_INET6={0x40, 0xa, 0x0, 0x1, [@IFLA_INET6_TOKEN={0x14, 0x7, @private0}, @IFLA_INET6_TOKEN={0x14, 0x7, @private1}, @IFLA_INET6_TOKEN={0x14, 0x7, @loopback}]}, @AF_MPLS={0x4}, @AF_INET={0x20, 0x2, 0x0, 0x1, {0x1c, 0x1, 0x0, 0x1, [{0x8, 0x1e, 0x0, 0x0, 0x8}, {0x8, 0xe, 0x0, 0x0, 0x1000}, {0x8, 0x4, 0x0, 0x0, 0x2}]}}, @AF_INET={0x10, 0x2, 0x0, 0x1, {0xc, 0x1, 0x0, 0x1, [{0x8, 0x4, 0x0, 0x0, 0x4192e4c4}]}}]}, @IFLA_LINKINFO={0x20, 0x12, 0x0, 0x1, @bond_slave={{0xf}, {0xc, 0x5, 0x0, 0x1, @IFLA_BOND_SLAVE_QUEUE_ID={0x6, 0x5, 0x7d}}}}, @IFLA_IFALIASn={0x4}]}, 0x178}, 0x1, 0x0, 0x0, 0x4000000}, 0x0) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r4, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r4}]}, 0x4c}}, 0x0) [ 1972.098308][T28045] 8021q: adding VLAN 0 to HW filter on device bond914 01:55:21 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f0000000340)={'#! ', './file0', [], 0xa, "c34a26efa3ef5671d2b47ac9020ba4487452028a21746fd1e9d5fe6b732acc556ad67f7cd50a4a86194cadb68634fbc7235da26343fa73fed73b209fd22e11af560f58880da9f7ecdc1e30a8899448d4255a2b434600412a"}, 0x63) pipe(&(0x7f0000000000)={0xffffffffffffffff, 0xffffffffffffffff}) bpf$BPF_LINK_CREATE(0x1c, &(0x7f0000000040)={r0, r2, 0x22}, 0x10) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) r4 = socket$nl_generic(0x10, 0x3, 0x10) sendfile(r4, r3, 0x0, 0x100000002) r5 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000400)='cgroup.controllers\x00', 0x275a, 0x0) write$binfmt_script(r5, &(0x7f00000003c0)=ANY=[], 0x208e24b) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r5, 0x0) r6 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) r7 = socket$nl_generic(0x10, 0x3, 0x10) sendfile(r7, r6, 0x0, 0x100000002) pipe(&(0x7f0000000240)={0xffffffffffffffff}) bpf$PROG_LOAD(0x5, &(0x7f00000002c0)={0x1a, 0x4, &(0x7f0000000100)=@raw=[@map_val={0x18, 0x4, 0x2, 0x0, r2, 0x0, 0x0, 0x0, 0x400000}, @initr0={0x18, 0x0, 0x0, 0x0, 0x80000001, 0x0, 0x0, 0x0, 0x2fd}], &(0x7f0000000140)='GPL\x00', 0xfffffffd, 0x2a, &(0x7f0000000180)=""/42, 0x41000, 0x17, '\x00', 0x0, 0x14, 0xffffffffffffffff, 0x8, &(0x7f00000001c0)={0x3, 0x1}, 0x8, 0x10, &(0x7f0000000200)={0x1, 0x7, 0xf8000000, 0x9}, 0x10, 0x0, 0xffffffffffffffff, 0x0, &(0x7f0000000280)=[r1, r3, r1, r5, r6, r0, r8]}, 0x80) [ 1972.210607][T28047] bond914: (slave bridge881): making interface the new active one [ 1972.243732][T28047] bridge881: entered promiscuous mode 01:55:21 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x74, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1972.273808][T28047] bond914: (slave bridge881): Enslaving as an active interface with an up link [ 1972.353732][ T27] audit: type=1804 audit(1691718921.982:368): pid=28084 uid=0 auid=4294967295 ses=4294967295 subj=unconfined op=invalid_pcr cause=open_writers comm="syz-executor.5" name="/root/syzkaller-testdir4114978858/syzkaller.OpzNfI/3129/cgroup.controllers" dev="sda1" ino=1972 res=1 errno=0 [ 1972.369916][T28061] bond1020: entered promiscuous mode [ 1972.387604][T28061] 8021q: adding VLAN 0 to HW filter on device bond1020 [ 1972.511587][T28067] bond1020: (slave bridge984): making interface the new active one [ 1972.519813][T28067] bridge984: entered promiscuous mode [ 1972.539030][T28067] bond1020: (slave bridge984): Enslaving as an active interface with an up link 01:55:22 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x1001, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1972.615441][T28068] bond968: entered promiscuous mode [ 1972.621329][T28068] 8021q: adding VLAN 0 to HW filter on device bond968 [ 1972.769121][T28072] bond968: (slave bridge926): making interface the new active one [ 1972.778990][T28072] bridge926: entered promiscuous mode [ 1972.790177][T28072] bond968: (slave bridge926): Enslaving as an active interface with an up link 01:55:22 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x8a03, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1972.916401][T28076] bond358: entered promiscuous mode [ 1972.922931][T28076] 8021q: adding VLAN 0 to HW filter on device bond358 01:55:22 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(0xffffffffffffffff, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x262, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1973.020467][T28088] validate_nla: 14 callbacks suppressed [ 1973.020486][T28088] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 01:55:22 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f0000000340)={'#! ', './file0', [], 0xa, "c34a26efa3ef5671d2b47ac9020ba4487452028a21746fd1e9d5fe6b732acc556ad67f7cd50a4a86194cadb68634fbc7235da26343fa73fed73b209fd22e11af560f58880da9f7ecdc1e30a8899448d4255a2b434600412a"}, 0x63) (async) pipe(&(0x7f0000000000)={0xffffffffffffffff, 0xffffffffffffffff}) bpf$BPF_LINK_CREATE(0x1c, &(0x7f0000000040)={r0, r2, 0x22}, 0x10) (async, rerun: 64) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) (async, rerun: 64) r4 = socket$nl_generic(0x10, 0x3, 0x10) sendfile(r4, r3, 0x0, 0x100000002) (async) r5 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000400)='cgroup.controllers\x00', 0x275a, 0x0) write$binfmt_script(r5, &(0x7f00000003c0)=ANY=[], 0x208e24b) (async) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r5, 0x0) (async) r6 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) r7 = socket$nl_generic(0x10, 0x3, 0x10) sendfile(r7, r6, 0x0, 0x100000002) pipe(&(0x7f0000000240)={0xffffffffffffffff}) bpf$PROG_LOAD(0x5, &(0x7f00000002c0)={0x1a, 0x4, &(0x7f0000000100)=@raw=[@map_val={0x18, 0x4, 0x2, 0x0, r2, 0x0, 0x0, 0x0, 0x400000}, @initr0={0x18, 0x0, 0x0, 0x0, 0x80000001, 0x0, 0x0, 0x0, 0x2fd}], &(0x7f0000000140)='GPL\x00', 0xfffffffd, 0x2a, &(0x7f0000000180)=""/42, 0x41000, 0x17, '\x00', 0x0, 0x14, 0xffffffffffffffff, 0x8, &(0x7f00000001c0)={0x3, 0x1}, 0x8, 0x10, &(0x7f0000000200)={0x1, 0x7, 0xf8000000, 0x9}, 0x10, 0x0, 0xffffffffffffffff, 0x0, &(0x7f0000000280)=[r1, r3, r1, r5, r6, r0, r8]}, 0x80) [ 1973.114394][T28088] bond527: entered promiscuous mode [ 1973.130276][T28088] 8021q: adding VLAN 0 to HW filter on device bond527 [ 1973.168208][T28090] netlink: 'syz-executor.2': attribute type 1 has an invalid length. 01:55:22 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f0000000340)={'#! ', './file0', [], 0xa, "c34a26efa3ef5671d2b47ac9020ba4487452028a21746fd1e9d5fe6b732acc556ad67f7cd50a4a86194cadb68634fbc7235da26343fa73fed73b209fd22e11af560f58880da9f7ecdc1e30a8899448d4255a2b434600412a"}, 0x63) pipe(&(0x7f0000000000)={0xffffffffffffffff, 0xffffffffffffffff}) bpf$BPF_LINK_CREATE(0x1c, &(0x7f0000000040)={r0, r2, 0x22}, 0x10) r3 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) r4 = socket$nl_generic(0x10, 0x3, 0x10) sendfile(r4, r3, 0x0, 0x100000002) r5 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000400)='cgroup.controllers\x00', 0x275a, 0x0) write$binfmt_script(r5, &(0x7f00000003c0)=ANY=[], 0x208e24b) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r5, 0x0) r6 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) r7 = socket$nl_generic(0x10, 0x3, 0x10) sendfile(r7, r6, 0x0, 0x100000002) pipe(&(0x7f0000000240)={0xffffffffffffffff}) bpf$PROG_LOAD(0x5, &(0x7f00000002c0)={0x1a, 0x4, &(0x7f0000000100)=@raw=[@map_val={0x18, 0x4, 0x2, 0x0, r2, 0x0, 0x0, 0x0, 0x400000}, @initr0={0x18, 0x0, 0x0, 0x0, 0x80000001, 0x0, 0x0, 0x0, 0x2fd}], &(0x7f0000000140)='GPL\x00', 0xfffffffd, 0x2a, &(0x7f0000000180)=""/42, 0x41000, 0x17, '\x00', 0x0, 0x14, 0xffffffffffffffff, 0x8, &(0x7f00000001c0)={0x3, 0x1}, 0x8, 0x10, &(0x7f0000000200)={0x1, 0x7, 0xf8000000, 0x9}, 0x10, 0x0, 0xffffffffffffffff, 0x0, &(0x7f0000000280)=[r1, r3, r1, r5, r6, r0, r8]}, 0x80) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) (async) write$binfmt_script(r0, &(0x7f0000000340)={'#! ', './file0', [], 0xa, "c34a26efa3ef5671d2b47ac9020ba4487452028a21746fd1e9d5fe6b732acc556ad67f7cd50a4a86194cadb68634fbc7235da26343fa73fed73b209fd22e11af560f58880da9f7ecdc1e30a8899448d4255a2b434600412a"}, 0x63) (async) pipe(&(0x7f0000000000)) (async) bpf$BPF_LINK_CREATE(0x1c, &(0x7f0000000040)={r0, r2, 0x22}, 0x10) (async) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) (async) socket$nl_generic(0x10, 0x3, 0x10) (async) sendfile(r4, r3, 0x0, 0x100000002) (async) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000400)='cgroup.controllers\x00', 0x275a, 0x0) (async) write$binfmt_script(r5, &(0x7f00000003c0)=ANY=[], 0x208e24b) (async) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r5, 0x0) (async) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) (async) socket$nl_generic(0x10, 0x3, 0x10) (async) sendfile(r7, r6, 0x0, 0x100000002) (async) pipe(&(0x7f0000000240)) (async) bpf$PROG_LOAD(0x5, &(0x7f00000002c0)={0x1a, 0x4, &(0x7f0000000100)=@raw=[@map_val={0x18, 0x4, 0x2, 0x0, r2, 0x0, 0x0, 0x0, 0x400000}, @initr0={0x18, 0x0, 0x0, 0x0, 0x80000001, 0x0, 0x0, 0x0, 0x2fd}], &(0x7f0000000140)='GPL\x00', 0xfffffffd, 0x2a, &(0x7f0000000180)=""/42, 0x41000, 0x17, '\x00', 0x0, 0x14, 0xffffffffffffffff, 0x8, &(0x7f00000001c0)={0x3, 0x1}, 0x8, 0x10, &(0x7f0000000200)={0x1, 0x7, 0xf8000000, 0x9}, 0x10, 0x0, 0xffffffffffffffff, 0x0, &(0x7f0000000280)=[r1, r3, r1, r5, r6, r0, r8]}, 0x80) (async) [ 1973.287938][T28090] bond915: entered promiscuous mode [ 1973.312108][T28090] 8021q: adding VLAN 0 to HW filter on device bond915 [ 1973.380128][ T27] audit: type=1804 audit(1691718923.022:369): pid=28115 uid=0 auid=4294967295 ses=4294967295 subj=unconfined op=invalid_pcr cause=open_writers comm="syz-executor.5" name="/root/syzkaller-testdir4114978858/syzkaller.OpzNfI/3131/cgroup.controllers" dev="sda1" ino=1972 res=1 errno=0 [ 1973.397580][T28093] bond915: (slave bridge882): making interface the new active one [ 1973.420764][T28093] bridge882: entered promiscuous mode 01:55:23 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f00000002c0)={'#! ', './file0', [], 0xa, "59515a47659caf973b9e14ae917ca92f630b683e3dbbdfb786e444d8ca1807617e75d4ea84e7740034cb58e33e85b0a90684602aee3723b82d150eefba64040d51239eafe86290d41e22bbbfce30d0e9fcb17fb9907a4343d3dc2df0ef85018c26582f9d7aaeb40b21abbd7a6dfdb2acb9f5cf5eed645c110f176bcf0a33b834a210ecf1a4b2ec8e2c8bdc80f9aae7e45bed2328cd068131fa60d0ab14fad4d4edaa71891623fb0c3c792ffed3ce"}, 0xb9) r1 = socket$inet6_tcp(0xa, 0x1, 0x0) sendto$inet6(r1, 0x0, 0x0, 0x20000004, &(0x7f0000000080)={0xa, 0x4e22}, 0x1c) recvfrom$inet6(r1, &(0x7f0000000000)=""/26, 0x1a, 0x40004050, &(0x7f0000001880)={0xa, 0x0, 0x0, @rand_addr=' \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02'}, 0x1c) r2 = socket$igmp6(0xa, 0x3, 0x2) bind$inet6(r2, &(0x7f0000001540)={0xa, 0x4e24, 0x2, @ipv4={'\x00', '\xff\xff', @broadcast}, 0x200}, 0x1c) sendfile(r1, r0, 0x0, 0x5) r3 = accept4$phonet_pipe(r0, &(0x7f0000000040), &(0x7f0000000100)=0x10, 0x80800) r4 = socket$phonet(0x23, 0x2, 0x1) r5 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r5, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r5, 0x0) r6 = accept4(r5, 0x0, 0x0, 0x0) connect$unix(r6, &(0x7f0000000080)=@file={0x0, './file0\x00'}, 0x6e) sendto$inet6(r6, &(0x7f00000000c0), 0xfffffdda, 0x0, 0x0, 0x600000000000004) setsockopt$MRT6_FLUSH(r6, 0x29, 0xd4, &(0x7f0000001580)=0x1, 0x4) vmsplice(r4, &(0x7f00000014c0)=[{&(0x7f00000000c0)="842f685fcee55cb4d1fbf66567a84479cf7ba9b3dd278ec368", 0x19}, {&(0x7f00000001c0)="21e10f30e01a247b27165bfac1ed93d8cf516948e8d72d39352360dfda9e78e3719bd0568b404fa58fe539d6eb0d5ee7bf0358b748674b3ffc638448e76a4f809eb1b459554f4fcfd892b63330a67c664c3f108d12f7cabffdccca7d454bfc0c47d09dd1c2d07df4991d3ebad7164637277abb45480dc79781def8368b97b0ad130cf668e23a107519b0c958ec7859e0158c778844cc7e3fd9ff63fe1553bafc181c3101af17c559303de7076c7f5f", 0xaf}, {&(0x7f0000000380)="325a93951aea85702edff6024a95790e7d3d6ca29e4291328ff67964122e63f31b3a99d0bba1bfbed70591a7220475ca988d855ea4158c4bd10a8371e0b54a59d4835a082b3bff1f1a0150405ce92391de3df30ac6acfe628197e24f5abc90f779d528b8e708214b81bdbc20f52bb0963771a4247e15fb3ca4d2b3bbe13357c3bc0207f1df67f7df041a14b9086cb3184a6dec2a32c9e62e5728f75b276c4ec612b7577b01ef68c18c3e28ae443e24ff50f1f247ece239303e65d5adbc5c3aa8c9f8647e920e924012c4b39780a08982c121e1f7a50336958d8ab234c613b87c4b3423c2f296595d899371f3dad756ea76fc70b9ccb30a32e3a284c6fcc18078471f39d3cff36f3e2ca16b718cb0e45c211abc236a380c4130fc0fc6eb4746f9c70eb7afe5721ebf227e7c7bf13887939195e94ddbdc4d7c3845034453dcfa452986f1e170b32df81bdc3e3ca683c5374a7935479757c7db9f13ab3824b7d747b9ab44753cc10dce0e9691b623476601808ff924c42382514a558bb0ddfb042e704fbf3422e4f19a10976ee5d0d6a86edd40327051ae4c5494d6150a1e7014f7921ae996dd10f075f1bfe87e0a484d5df7f2d17bf72905c4bb4e2cbc86138f8e4738c91f812105991018044d902ef70aff5439b42422f3cb17fed2127f40a4e1390a892d37a266bade74f402b465c56dd5a1837292ca290f91b939ed5a2617677788f33a79ca7fd4b029ed80d244911d4a09efb6c76810aef904b7b2a8b42ba0a1692884bbeb170577bc93c074ff533d61aa49c824ff22e7170ff6d55b06c4fe1bd662688cb5dcb07b4e99bd406bf82b46d99db73ba0f11400885b9dfd63f5f891de0e50311dc8f5ba9515f6cd9ec55584264de2b7cdf39cc587faa4e5873b813c195fb4c583e25957dcfcadb553c69dbbf5ae644aaf30c2a594094e9a1b307cf138495dac50cd6755e51671f44201f2358524b043dc73720cd40e433b9a40c6331fabb88418748781786e21e737aac700ec5c47357d5eaa6cc3d4e581e8f1b40d0746181b1b02f8e62cb8da7f3258ef4f2634bbf61ed0bde3d5cf1b6e032bd8242c1c8562fa8cb54eb9687e2c7f25675969d0df1db2fbbe3a85d6aadc0cec5b16ae8782c45aa92372ad9c38533137edc18361ce87f180a58f2af05ad42c7b9f71856d3b5f7e5cab7a8803d0adb2f13afb8b8f2e1c96e8c0520014967fc1e4404a80ac6caaabdf1eb66bf9e84ee2bf16124e0278893e5ef8f2ca5c013d14978d643cd53354d58c161a4103ebdc6c1acd1726663b9112001a9f5a205089af99f023add71e1877a84b9ee3d0afd260527af5277fe0b5732b6c7a011d3b29cc8abb9e79fa607150aa81089e8b328ba29bc0562c446fdc066abfdc0d284c1dec661a16c0207f2c27186f41dba8a4bb2956cff53f7797c6686f96cefc37d74b932aea0938bd12cbe793c27dcc54d964bc6022cbf6a813ae68bf48047b75ef92bc0d6da022895201234c0bd63ed267d9d416ecb04f63aba5b826c12244c01b78f71271f74ab09870edab5f19842ca5bf9a9a5140f5f8ff06888eed65b4765df7c41969c3efc608f99c6b98e94080e6da11df2e493a3674d5bb59d8226d1ea29d798261803bd2bd63a144d6b4858bd1009151f938ad04fa3277bda42611165ff2a16b36a3798ff3f16ec37b69381861bb82dadfcac06513bf098ca8d4fa81cd12c758e3590478b1fd7d69e60290f7ebd9ee210fe2eb5cb22e65ed7009024271b9c623d8dab6e10af3bf60c19bd9bbdea0e48a2110c5246a2f5ce9be034a2942bfeba648073e9d23e793cfc57be56e2b1a4c865f702de801ee3f59a0968b0f9074a482944276ae2d2c23ef41c0729d5d273c830e59fd69bfcf60cdb8cfab23534fcb8924f3f6687ec7a7b10cedda1f3f4dd0e661982c05a6b641ab8e35e99f9ba8ea72be7e5c7a5afa923ddda7ce33a3dc1c2c72d394cfe3baf55aa61dc0eb5df3c76df5ddd1bcfbcf7edfcc86bfa571d4d46643e4e10959aee446d301a5df5ffe7c30c37ea5e963798f23382c29e2c411e3db67c132a8ab89d92e157c7dce9723a0c22560762d10be9e274213cde3d0afd2dd6b315b7d02f05c755bb15f9235ff717c854bb71fb188bb7c07b10dee1eded4933bfe41f1731cf3daff1bbb29448d39d5adcfe7e33de2d7a888a2d3f9fca312669c70861e529713004ac6992097f7a40824d96b8e595299c0613e22be9a320f65d9c087e68b3396e2cc4ee9bfbb4a8d579ace797d96e7e7cf0f937750df7da69a7e646b8e2ee6363e7f0825b26ad377bbef9a553fd18d4a03285a89c20457090d3db4267029c2b9db42591cbec55fea89cd945de6d106060031e5f027c43209b31d45407d61b4f540011146da1e68e7465e8d957a7a47594360582d4a9ef998cd70b876a4efeb077c89eda387d9892a4d9f56883ba147f5721856fc64a9a902ce0c5f97eb02cb18774aaf3270804932345ea60418e1a334332ae13c3092c6516ab4334fb25228455fa219c86ce0bf4a0a37b48d34e3e2e06f4338341de5c767199f6e35fba2466e7f7c8323a10cc3fbf5bfa0e8381ccf00f902dbbe84fa8619b3ea10361dbcbf3f09ca648093776156e8408fa191d94a94577da609c760a0d4af675ff3e4acf2d0a6d35abdba10ca23efa1bc018f75c2b6fcb9647650ccb75ad72c1d82572d10e7ea9ab46a1a37a9da9d50b363539c4db2fa9bb5f67ae502e2f9e2cafe1cb531d4bfcfdc09437790bb2ea714adbf440965012edc308628ee4f49de8a933c31bbf3e87949f8c8f3896b574bcd33d09f8229e42358204664f5b69d2033256bf696a19b8fd8415a577016f8acdd1c92ba81aae4fd0955a44455b075183fb05dc6c5ea0834d0c09b2e0ff714e85a1cb32be14c7e8cccfb8c7dc49bb1cc6a5c38bebd11f6ebc006cb40868a8c79169c45ba416db0934a66f351dfa166271a1dea2878455caa616df4836b029efea230e7a5783b47bbd7529f714cf591e6818115aecd24947339d04e559975827f4f4330c6849b5948a654b95d2230c05f28db542688c200c352850e4870c07fb197abdca9c7d14928bc73044c470eb35ed799ef87332f7f498574b2c4725a7dc75c4794039d01e353472de8c11d2a399213a2dca57b47e50a538e9b5aa0aa3ee334c965ca7bbe1c43030cfc1ffd96a90c723e5d93585d6b3d4016e6a54f7e83317d19f672fd97d2818000068c6124333ded5a7d47afc1acf644e4730c2c9b0e03aeba2aa420094a60611a5f95c7dd68cc5eff0c9e335dd66b3053d415a8123a0754a5fe80b32f294ce825b075425496ec10019e11d2b8b860c6bbe73d8313ff0d2b33fe8e5dab703de69200bbd4d5b7d24e558210ebd7de608343335fbb562c4674b564f78e3d67cca757b616bdb314f1b44ae576340418dd6b48607d5918f0604761f7242782c5db82d77f946124ab8fe7b5689be9d19cada950eb66a22f4e42ff6873391c8bad25524dd4529921c1ddbb41f487d443d43d11023db2e50dccf593a2189e5c77014a81a35a53b478d65c658bf775b16659d1ae1d43a5f99ae75d70c6dbaa4553f92034bafbe98848614d309acf2846bc67af8ac37796478fcc48ebac9f87acb1ef8827cb0c625c75799996eaebe597098294582a0ee8011e2935002f9e8ec818812e8c76b8cacc45326e2dc09b95214fbf028d427c208b3b017408a3bb1fbbb7b6c1b66a003ceef82c3fbd89fac47866e040fd82c10fb2e9be12ee0180635c6a64aa5c0fc1f8fb6c2396a2ad732108842928d373983ca04a2c2a8df90426e4e59762445f99401ec0400b7e328a6f69832d2648765aeb9700234dc0ce5be1d7308c730048c1375b3316cfc31516405eb0fee7f6a1a26ec71ba01020eb0878232e8b7b91ca605fe3a56cf4d0e7c8a1615c3b53cf8feccb1333296e074134bf47fa8e1544b3698ab4adf9bef65f4d0edac59c7f95e67e6e43060fbe95c49e6c292910afdc728595c18121dfeac7521bce91bc4216c225ad0d9b58ad63fbb466a4b44d85a0554a0b7faa2a9b735dc2f7ec3356c144721c07494f77b0872432982df4d03f0293cfdd45bf78eece86eb6be068adfba629772cd8fcfbdaf9e72e1bcec3c6415b5d1788ec1ab61c11faa0b4c133d594fe3125690a3574773ca12d275131d0124d83e8da9136363d2c2b0ad7df9d455d1e2b9d1c541ecd6c3f7dc1e71bdac014b948312fc04fd271a342df46b9b6910fb734a87d8e87798a85280ca57ec57da63eacb75f21dc0c555b06623d7c3b0cf7c3f248b978d406b456c80cef232637bd2a4f244defbfc3102370dd1e90e3a838652f190bba03eff9266a98699988ebefcd6939178f04e3cbff894c73b0fe6721b219256aa28d77b5810366ae93cfdb3de67116da25e4f7be395a7165ccfd85fba3e4fb43a666079a614f08084ae6548a7c405ba54fab9ca74a564c795595a35a037929176d1107639b6e0d971dd0f0d4c6b38cd3a1529692c5e4cdc62451581f6c6a4519a61650b64baf8968cc4261610bb6fcf1af3da4a237d8374f7610335e54caa4731592ab3ee1e74bc68e955ffbc71e688b15815e8af539423a4dfb1abe977a58e4197e61c07323a63bb01d61ed1f792da3eab11e1c38f94a1429a7ee5019d6e857a94e079a4e602d4722149e50575c32677ab8e084e7e0377573514d8033853abb4e130d84df7e51b54e1d57a9812766743fd0d3b30e3dabf71a986fe1906fd80133301504b7070270a13c79f8ec1279090f0e82489d4aa4c80ace23abcdd23c2a0e4e52ff006e1ab9605b13c9ae0c6faff1a025fefaaf5de2cde765e971fbae9fac5f69326ce9c376a365447fcd880eea988ef1f22bed89dc29d9120797ec609a6efafff98d794b717a71d7f874b675c5c3d1b1c4140ea7759c2204a6d428bd454026ed2bc76a716443475035f124a8e9328e817ac5cc532b2e3c9005253e4a4188105c5872235410fd143d761bae0244ffcf9b72bb14d8676f86b25f1f44e338d95e285abe66209b46fab93cd402b1798ee08c33c559370ce46772bc7cf479878647241f01a8c75350336507c965b5fa0b2085b0c6bec50b6be02d0fdd29a4f4fcb95b58dc1ae229bc973fae8e47cffadd4085710a1a457eabae343ceb34ccf1dd15c3d810a93ac7ee428b4efaf2202ce9d25f7670de8cab9c099379f8c71829b2e0994c689319f76ab9d63353ab133ea38f2a69bbb85c74740a87f699f2f3b1780f4372f5929c1cab0517f978adc6867e65b83854ffdf186fff6d7add6ffa1c2ccb6c13131c17f15cf1ad80e62fcd0df1ddcd388213e3c6f9e6a253af1b3b15fb3af21ce8d36470fb1f1157b8b212ec931ea3213d3fe978615533a9e061c7a95a5086e722447da4aad0e8bd161b3692757d016032f5b597ac79580e067976c70c391f6e1b1020c6a244633adae47f8e08e4d2195843e2154e6bf1fc81214ace796ce1f8d46cfa7628ccce4c880bcd8ea51ad69451262f0cd1abe85985779f7948e4dad4d1376267934afe8b991e9c46a9bd9aaab8481fad193cc3bfd335ae6f3ff6563ef5c293dac8f4b215a15e3c58fa8594a789138a3390da9a67a32cac466eb994e171bd28feb0ed03730912fb02c0fb57e0bd995aa7ebcfb1e5cbb5d7d4d5d165b9a66eb8407483c81b864c1febbc52c966c270ffe5d9c1db1e7b95ce7a6c5a0f3921468a862c3e666fdcbe06d3a4ec2d1d0a72af8e4a0e26a843194c80e69218309b88b164ea384ea85f56278b7a0c149af6c9b91ac6ae76bce5d46177b962a9c8", 0x1000}, {&(0x7f0000001380)="f8dffdaf4a32af63a8568eb3e43b6e3c4f03334988792525079f643c453dac54e0298ce10dab8366499d15df9c0dacd469fcda2ced916c43a6edce7595fa72c275068be7bd5b37f12d5ad517a02e094347cf05077a8012a5228e", 0x5a}, {&(0x7f0000001400)="53809b6fe07f438c248930b98fc4a61cfdbacdfebaf603903e4553381155de70c26c3d47cacf8929ae72268986f42b9efcb63cd4ace429eea8d503d91883d36889f76149ea761dacd7ece1c0a63447d597331a554908fcc0e39f76d2cb569e32f9db686a9eb0f52ec98ed3720a96c3cbf27dbafe91b435ff62cf6f7ee3e75f22afa78f5b9461079e55099fcae69fcb6c307aa1206357b32c185a7353", 0x9c}, {&(0x7f0000000280)="27b60dbb150b732f5decb3e15254869b2cdcae6a54df81505f656ec45ee348ab", 0x20}], 0x6, 0x0) r7 = openat$cgroup_ro(r0, &(0x7f0000000140)='blkio.bfq.empty_time\x00', 0x0, 0x0) mmap(&(0x7f0000fff000/0x1000)=nil, 0x1000, 0xa, 0x50, r4, 0xf79ef000) sendfile(r3, r7, &(0x7f0000000180)=0x8001, 0x7) 01:55:23 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x7a, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1973.443288][T28093] bond915: (slave bridge882): Enslaving as an active interface with an up link [ 1973.565230][T28092] bond527: (slave bridge469): making interface the new active one [ 1973.589979][T28092] bridge469: entered promiscuous mode [ 1973.605013][T28092] bond527: (slave bridge469): Enslaving as an active interface with an up link 01:55:23 executing program 1: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r0, 0x0) (async) accept4(r0, 0x0, 0x0, 0x0) (async) r1 = socket$netlink(0x10, 0x3, 0x0) (async) r2 = socket$netlink(0x10, 0x3, 0x0) (async) r3 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r3, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) (async) getsockname$packet(r3, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) (async) r5 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r5, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) (async) listen(r5, 0x0) (async) r6 = accept4(r5, 0x0, 0x0, 0x0) connect$unix(r6, &(0x7f0000000080)=@file={0x0, './file0\x00'}, 0x6e) (async) sendto$inet6(r6, &(0x7f00000000c0), 0xfffffdda, 0x0, 0x0, 0x600000000000004) r7 = socket$nl_generic(0x10, 0x3, 0x10) ioctl$ifreq_SIOCGIFINDEX_batadv_mesh(r7, 0x8933, &(0x7f0000000080)={'batadv0\x00', 0x0}) sendmsg$BATADV_CMD_GET_HARDIF(0xffffffffffffffff, &(0x7f0000000200)={0x0, 0x0, &(0x7f00000001c0)={&(0x7f0000000000)=ANY=[@ANYBLOB='$\x00\x00\x00', @ANYRES16, @ANYBLOB="010090c90000000000000500000008000300", @ANYRES8=r8], 0x24}}, 0x0) sendmsg$nl_route(r6, &(0x7f0000000180)={&(0x7f0000000100)={0x10, 0x0, 0x0, 0x4000}, 0xc, &(0x7f0000000140)={&(0x7f00000006c0)=@newlink={0x178, 0x10, 0x2, 0x70bd2d, 0x25dfdbfd, {0x0, 0x0, 0x0, r8, 0x20, 0x20008}, [@IFLA_AF_SPEC={0x134, 0x1a, 0x0, 0x1, [@AF_INET6={0x5c, 0xa, 0x0, 0x1, [@IFLA_INET6_ADDR_GEN_MODE={0x5}, @IFLA_INET6_ADDR_GEN_MODE={0x5, 0x8, 0xff}, @IFLA_INET6_ADDR_GEN_MODE={0x5, 0x8, 0x3f}, @IFLA_INET6_ADDR_GEN_MODE={0x5, 0x8, 0x20}, @IFLA_INET6_ADDR_GEN_MODE={0x5, 0x8, 0x4}, @IFLA_INET6_TOKEN={0x14, 0x7, @private0}, @IFLA_INET6_TOKEN={0x14, 0x7, @mcast2}, @IFLA_INET6_ADDR_GEN_MODE={0x5, 0x8, 0x20}]}, @AF_INET6={0x28, 0xa, 0x0, 0x1, [@IFLA_INET6_ADDR_GEN_MODE={0x5, 0x8, 0x8}, @IFLA_INET6_ADDR_GEN_MODE={0x5, 0x8, 0x8}, @IFLA_INET6_TOKEN={0x14, 0x7, @initdev={0xfe, 0x88, '\x00', 0x0, 0x0}}]}, @AF_INET6={0x38, 0xa, 0x0, 0x1, [@IFLA_INET6_ADDR_GEN_MODE={0x5, 0x8, 0x6}, @IFLA_INET6_ADDR_GEN_MODE={0x5, 0x8, 0xf}, @IFLA_INET6_ADDR_GEN_MODE={0x5, 0x8, 0x5}, @IFLA_INET6_TOKEN={0x14, 0x7, @loopback}, @IFLA_INET6_ADDR_GEN_MODE={0x5, 0x8, 0x5}]}, @AF_INET6={0x40, 0xa, 0x0, 0x1, [@IFLA_INET6_TOKEN={0x14, 0x7, @private0}, @IFLA_INET6_TOKEN={0x14, 0x7, @private1}, @IFLA_INET6_TOKEN={0x14, 0x7, @loopback}]}, @AF_MPLS={0x4}, @AF_INET={0x20, 0x2, 0x0, 0x1, {0x1c, 0x1, 0x0, 0x1, [{0x8, 0x1e, 0x0, 0x0, 0x8}, {0x8, 0xe, 0x0, 0x0, 0x1000}, {0x8, 0x4, 0x0, 0x0, 0x2}]}}, @AF_INET={0x10, 0x2, 0x0, 0x1, {0xc, 0x1, 0x0, 0x1, [{0x8, 0x4, 0x0, 0x0, 0x4192e4c4}]}}]}, @IFLA_LINKINFO={0x20, 0x12, 0x0, 0x1, @bond_slave={{0xf}, {0xc, 0x5, 0x0, 0x1, @IFLA_BOND_SLAVE_QUEUE_ID={0x6, 0x5, 0x7d}}}}, @IFLA_IFALIASn={0x4}]}, 0x178}, 0x1, 0x0, 0x0, 0x4000000}, 0x0) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r4, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) (async) sendmsg$nl_route(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r4}]}, 0x4c}}, 0x0) [ 1973.628619][T28096] netlink: 'syz-executor.0': attribute type 1 has an invalid length. 01:55:23 executing program 1: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) (async) listen(r0, 0x0) (async) accept4(r0, 0x0, 0x0, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket$netlink(0x10, 0x3, 0x0) (async) r3 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r3, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r3, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) r5 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r5, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) (async) listen(r5, 0x0) (async) r6 = accept4(r5, 0x0, 0x0, 0x0) connect$unix(r6, &(0x7f0000000080)=@file={0x0, './file0\x00'}, 0x6e) (async) sendto$inet6(r6, &(0x7f00000000c0), 0xfffffdda, 0x0, 0x0, 0x600000000000004) (async, rerun: 64) r7 = socket$nl_generic(0x10, 0x3, 0x10) (rerun: 64) ioctl$ifreq_SIOCGIFINDEX_batadv_mesh(r7, 0x8933, &(0x7f0000000080)={'batadv0\x00', 0x0}) sendmsg$BATADV_CMD_GET_HARDIF(0xffffffffffffffff, &(0x7f0000000200)={0x0, 0x0, &(0x7f00000001c0)={&(0x7f0000000000)=ANY=[@ANYBLOB='$\x00\x00\x00', @ANYRES16, @ANYBLOB="010090c90000000000000500000008000300", @ANYRES8=r8], 0x24}}, 0x0) sendmsg$nl_route(r6, &(0x7f0000000180)={&(0x7f0000000100)={0x10, 0x0, 0x0, 0x4000}, 0xc, &(0x7f0000000140)={&(0x7f00000006c0)=@newlink={0x178, 0x10, 0x2, 0x70bd2d, 0x25dfdbfd, {0x0, 0x0, 0x0, r8, 0x20, 0x20008}, [@IFLA_AF_SPEC={0x134, 0x1a, 0x0, 0x1, [@AF_INET6={0x5c, 0xa, 0x0, 0x1, [@IFLA_INET6_ADDR_GEN_MODE={0x5}, @IFLA_INET6_ADDR_GEN_MODE={0x5, 0x8, 0xff}, @IFLA_INET6_ADDR_GEN_MODE={0x5, 0x8, 0x3f}, @IFLA_INET6_ADDR_GEN_MODE={0x5, 0x8, 0x20}, @IFLA_INET6_ADDR_GEN_MODE={0x5, 0x8, 0x4}, @IFLA_INET6_TOKEN={0x14, 0x7, @private0}, @IFLA_INET6_TOKEN={0x14, 0x7, @mcast2}, @IFLA_INET6_ADDR_GEN_MODE={0x5, 0x8, 0x20}]}, @AF_INET6={0x28, 0xa, 0x0, 0x1, [@IFLA_INET6_ADDR_GEN_MODE={0x5, 0x8, 0x8}, @IFLA_INET6_ADDR_GEN_MODE={0x5, 0x8, 0x8}, @IFLA_INET6_TOKEN={0x14, 0x7, @initdev={0xfe, 0x88, '\x00', 0x0, 0x0}}]}, @AF_INET6={0x38, 0xa, 0x0, 0x1, [@IFLA_INET6_ADDR_GEN_MODE={0x5, 0x8, 0x6}, @IFLA_INET6_ADDR_GEN_MODE={0x5, 0x8, 0xf}, @IFLA_INET6_ADDR_GEN_MODE={0x5, 0x8, 0x5}, @IFLA_INET6_TOKEN={0x14, 0x7, @loopback}, @IFLA_INET6_ADDR_GEN_MODE={0x5, 0x8, 0x5}]}, @AF_INET6={0x40, 0xa, 0x0, 0x1, [@IFLA_INET6_TOKEN={0x14, 0x7, @private0}, @IFLA_INET6_TOKEN={0x14, 0x7, @private1}, @IFLA_INET6_TOKEN={0x14, 0x7, @loopback}]}, @AF_MPLS={0x4}, @AF_INET={0x20, 0x2, 0x0, 0x1, {0x1c, 0x1, 0x0, 0x1, [{0x8, 0x1e, 0x0, 0x0, 0x8}, {0x8, 0xe, 0x0, 0x0, 0x1000}, {0x8, 0x4, 0x0, 0x0, 0x2}]}}, @AF_INET={0x10, 0x2, 0x0, 0x1, {0xc, 0x1, 0x0, 0x1, [{0x8, 0x4, 0x0, 0x0, 0x4192e4c4}]}}]}, @IFLA_LINKINFO={0x20, 0x12, 0x0, 0x1, @bond_slave={{0xf}, {0xc, 0x5, 0x0, 0x1, @IFLA_BOND_SLAVE_QUEUE_ID={0x6, 0x5, 0x7d}}}}, @IFLA_IFALIASn={0x4}]}, 0x178}, 0x1, 0x0, 0x0, 0x4000000}, 0x0) (async) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r4, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) (async) sendmsg$nl_route(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r4}]}, 0x4c}}, 0x0) 01:55:23 executing program 1: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r0, 0x0) connect$inet6(r0, &(0x7f0000000340)={0xa, 0x4e20, 0x3, @private2, 0x3}, 0x1c) accept4(r0, 0x0, 0x0, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r3, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r3, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r4, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=ANY=[@ANYBLOB="4c00000010001fff00"/20, @ANYRES32=0x0, @ANYBLOB="0000000000000000240012800b0001006272696467650000140002800500260000000000080005000000000008000a00", @ANYRES32=r4, @ANYBLOB="05c5c430a1eee255e8fcbd4c745e32420bdde5396789bff4d0d4be60a301d24178d90bca760c3617016b8f90d5cce5ec4531c86fe1b01344c144caaee14bf717138b18e12dcfdd4df1394765ebe61f0390ae08dbb5c88f247bca82b9949fbfdf573e78b1148909a6d5f545bb00dfbeb08410"], 0x4c}}, 0x0) sendmsg$GTP_CMD_DELPDP(r1, &(0x7f0000000300)={&(0x7f00000001c0)={0x10, 0x0, 0x0, 0x20000000}, 0xc, &(0x7f0000000200)={&(0x7f0000000280)=ANY=[@ANYBLOB="25c0fbff", @ANYRES16=0x0, @ANYBLOB="000325bd7000fbdbdf25010000000800080003000000080009000200000008000400ac1414bb060006000000000008000200010000000c0003000300000000000000"], 0x48}, 0x1, 0x0, 0x0, 0x4000}, 0x800) [ 1973.656145][T28096] workqueue: Failed to create a rescuer kthread for wq "bond1021": -EINTR [ 1973.794739][T28101] netlink: 'syz-executor.3': attribute type 1 has an invalid length. 01:55:23 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x1180, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1973.832476][T28101] workqueue: Failed to create a rescuer kthread for wq "bond969": -EINTR [ 1973.965881][T28104] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 01:55:23 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x9a00, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) 01:55:23 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(0xffffffffffffffff, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x262, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1973.986857][T28104] workqueue: Failed to create a rescuer kthread for wq "bond359": -EINTR [ 1974.148377][T28126] netlink: 'syz-executor.2': attribute type 1 has an invalid length. [ 1974.212988][T28126] bond916: entered promiscuous mode [ 1974.218629][T28126] 8021q: adding VLAN 0 to HW filter on device bond916 01:55:24 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x86, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1974.324042][T28129] bond916: (slave bridge883): making interface the new active one [ 1974.335389][T28129] bridge883: entered promiscuous mode [ 1974.345123][T28129] bond916: (slave bridge883): Enslaving as an active interface with an up link 01:55:24 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f00000002c0)={'#! ', './file0', [], 0xa, "59515a47659caf973b9e14ae917ca92f630b683e3dbbdfb786e444d8ca1807617e75d4ea84e7740034cb58e33e85b0a90684602aee3723b82d150eefba64040d51239eafe86290d41e22bbbfce30d0e9fcb17fb9907a4343d3dc2df0ef85018c26582f9d7aaeb40b21abbd7a6dfdb2acb9f5cf5eed645c110f176bcf0a33b834a210ecf1a4b2ec8e2c8bdc80f9aae7e45bed2328cd068131fa60d0ab14fad4d4edaa71891623fb0c3c792ffed3ce"}, 0xb9) (async) r1 = socket$inet6_tcp(0xa, 0x1, 0x0) sendto$inet6(r1, 0x0, 0x0, 0x20000004, &(0x7f0000000080)={0xa, 0x4e22}, 0x1c) (async) recvfrom$inet6(r1, &(0x7f0000000000)=""/26, 0x1a, 0x40004050, &(0x7f0000001880)={0xa, 0x0, 0x0, @rand_addr=' \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02'}, 0x1c) r2 = socket$igmp6(0xa, 0x3, 0x2) bind$inet6(r2, &(0x7f0000001540)={0xa, 0x4e24, 0x2, @ipv4={'\x00', '\xff\xff', @broadcast}, 0x200}, 0x1c) (async) sendfile(r1, r0, 0x0, 0x5) (async) r3 = accept4$phonet_pipe(r0, &(0x7f0000000040), &(0x7f0000000100)=0x10, 0x80800) (async) r4 = socket$phonet(0x23, 0x2, 0x1) r5 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r5, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) (async) listen(r5, 0x0) (async) r6 = accept4(r5, 0x0, 0x0, 0x0) connect$unix(r6, &(0x7f0000000080)=@file={0x0, './file0\x00'}, 0x6e) (async) sendto$inet6(r6, &(0x7f00000000c0), 0xfffffdda, 0x0, 0x0, 0x600000000000004) setsockopt$MRT6_FLUSH(r6, 0x29, 0xd4, &(0x7f0000001580)=0x1, 0x4) (async) vmsplice(r4, &(0x7f00000014c0)=[{&(0x7f00000000c0)="842f685fcee55cb4d1fbf66567a84479cf7ba9b3dd278ec368", 0x19}, {&(0x7f00000001c0)="21e10f30e01a247b27165bfac1ed93d8cf516948e8d72d39352360dfda9e78e3719bd0568b404fa58fe539d6eb0d5ee7bf0358b748674b3ffc638448e76a4f809eb1b459554f4fcfd892b63330a67c664c3f108d12f7cabffdccca7d454bfc0c47d09dd1c2d07df4991d3ebad7164637277abb45480dc79781def8368b97b0ad130cf668e23a107519b0c958ec7859e0158c778844cc7e3fd9ff63fe1553bafc181c3101af17c559303de7076c7f5f", 0xaf}, {&(0x7f0000000380)="325a93951aea85702edff6024a95790e7d3d6ca29e4291328ff67964122e63f31b3a99d0bba1bfbed70591a7220475ca988d855ea4158c4bd10a8371e0b54a59d4835a082b3bff1f1a0150405ce92391de3df30ac6acfe628197e24f5abc90f779d528b8e708214b81bdbc20f52bb0963771a4247e15fb3ca4d2b3bbe13357c3bc0207f1df67f7df041a14b9086cb3184a6dec2a32c9e62e5728f75b276c4ec612b7577b01ef68c18c3e28ae443e24ff50f1f247ece239303e65d5adbc5c3aa8c9f8647e920e924012c4b39780a08982c121e1f7a50336958d8ab234c613b87c4b3423c2f296595d899371f3dad756ea76fc70b9ccb30a32e3a284c6fcc18078471f39d3cff36f3e2ca16b718cb0e45c211abc236a380c4130fc0fc6eb4746f9c70eb7afe5721ebf227e7c7bf13887939195e94ddbdc4d7c3845034453dcfa452986f1e170b32df81bdc3e3ca683c5374a7935479757c7db9f13ab3824b7d747b9ab44753cc10dce0e9691b623476601808ff924c42382514a558bb0ddfb042e704fbf3422e4f19a10976ee5d0d6a86edd40327051ae4c5494d6150a1e7014f7921ae996dd10f075f1bfe87e0a484d5df7f2d17bf72905c4bb4e2cbc86138f8e4738c91f812105991018044d902ef70aff5439b42422f3cb17fed2127f40a4e1390a892d37a266bade74f402b465c56dd5a1837292ca290f91b939ed5a2617677788f33a79ca7fd4b029ed80d244911d4a09efb6c76810aef904b7b2a8b42ba0a1692884bbeb170577bc93c074ff533d61aa49c824ff22e7170ff6d55b06c4fe1bd662688cb5dcb07b4e99bd406bf82b46d99db73ba0f11400885b9dfd63f5f891de0e50311dc8f5ba9515f6cd9ec55584264de2b7cdf39cc587faa4e5873b813c195fb4c583e25957dcfcadb553c69dbbf5ae644aaf30c2a594094e9a1b307cf138495dac50cd6755e51671f44201f2358524b043dc73720cd40e433b9a40c6331fabb88418748781786e21e737aac700ec5c47357d5eaa6cc3d4e581e8f1b40d0746181b1b02f8e62cb8da7f3258ef4f2634bbf61ed0bde3d5cf1b6e032bd8242c1c8562fa8cb54eb9687e2c7f25675969d0df1db2fbbe3a85d6aadc0cec5b16ae8782c45aa92372ad9c38533137edc18361ce87f180a58f2af05ad42c7b9f71856d3b5f7e5cab7a8803d0adb2f13afb8b8f2e1c96e8c0520014967fc1e4404a80ac6caaabdf1eb66bf9e84ee2bf16124e0278893e5ef8f2ca5c013d14978d643cd53354d58c161a4103ebdc6c1acd1726663b9112001a9f5a205089af99f023add71e1877a84b9ee3d0afd260527af5277fe0b5732b6c7a011d3b29cc8abb9e79fa607150aa81089e8b328ba29bc0562c446fdc066abfdc0d284c1dec661a16c0207f2c27186f41dba8a4bb2956cff53f7797c6686f96cefc37d74b932aea0938bd12cbe793c27dcc54d964bc6022cbf6a813ae68bf48047b75ef92bc0d6da022895201234c0bd63ed267d9d416ecb04f63aba5b826c12244c01b78f71271f74ab09870edab5f19842ca5bf9a9a5140f5f8ff06888eed65b4765df7c41969c3efc608f99c6b98e94080e6da11df2e493a3674d5bb59d8226d1ea29d798261803bd2bd63a144d6b4858bd1009151f938ad04fa3277bda42611165ff2a16b36a3798ff3f16ec37b69381861bb82dadfcac06513bf098ca8d4fa81cd12c758e3590478b1fd7d69e60290f7ebd9ee210fe2eb5cb22e65ed7009024271b9c623d8dab6e10af3bf60c19bd9bbdea0e48a2110c5246a2f5ce9be034a2942bfeba648073e9d23e793cfc57be56e2b1a4c865f702de801ee3f59a0968b0f9074a482944276ae2d2c23ef41c0729d5d273c830e59fd69bfcf60cdb8cfab23534fcb8924f3f6687ec7a7b10cedda1f3f4dd0e661982c05a6b641ab8e35e99f9ba8ea72be7e5c7a5afa923ddda7ce33a3dc1c2c72d394cfe3baf55aa61dc0eb5df3c76df5ddd1bcfbcf7edfcc86bfa571d4d46643e4e10959aee446d301a5df5ffe7c30c37ea5e963798f23382c29e2c411e3db67c132a8ab89d92e157c7dce9723a0c22560762d10be9e274213cde3d0afd2dd6b315b7d02f05c755bb15f9235ff717c854bb71fb188bb7c07b10dee1eded4933bfe41f1731cf3daff1bbb29448d39d5adcfe7e33de2d7a888a2d3f9fca312669c70861e529713004ac6992097f7a40824d96b8e595299c0613e22be9a320f65d9c087e68b3396e2cc4ee9bfbb4a8d579ace797d96e7e7cf0f937750df7da69a7e646b8e2ee6363e7f0825b26ad377bbef9a553fd18d4a03285a89c20457090d3db4267029c2b9db42591cbec55fea89cd945de6d106060031e5f027c43209b31d45407d61b4f540011146da1e68e7465e8d957a7a47594360582d4a9ef998cd70b876a4efeb077c89eda387d9892a4d9f56883ba147f5721856fc64a9a902ce0c5f97eb02cb18774aaf3270804932345ea60418e1a334332ae13c3092c6516ab4334fb25228455fa219c86ce0bf4a0a37b48d34e3e2e06f4338341de5c767199f6e35fba2466e7f7c8323a10cc3fbf5bfa0e8381ccf00f902dbbe84fa8619b3ea10361dbcbf3f09ca648093776156e8408fa191d94a94577da609c760a0d4af675ff3e4acf2d0a6d35abdba10ca23efa1bc018f75c2b6fcb9647650ccb75ad72c1d82572d10e7ea9ab46a1a37a9da9d50b363539c4db2fa9bb5f67ae502e2f9e2cafe1cb531d4bfcfdc09437790bb2ea714adbf440965012edc308628ee4f49de8a933c31bbf3e87949f8c8f3896b574bcd33d09f8229e42358204664f5b69d2033256bf696a19b8fd8415a577016f8acdd1c92ba81aae4fd0955a44455b075183fb05dc6c5ea0834d0c09b2e0ff714e85a1cb32be14c7e8cccfb8c7dc49bb1cc6a5c38bebd11f6ebc006cb40868a8c79169c45ba416db0934a66f351dfa166271a1dea2878455caa616df4836b029efea230e7a5783b47bbd7529f714cf591e6818115aecd24947339d04e559975827f4f4330c6849b5948a654b95d2230c05f28db542688c200c352850e4870c07fb197abdca9c7d14928bc73044c470eb35ed799ef87332f7f498574b2c4725a7dc75c4794039d01e353472de8c11d2a399213a2dca57b47e50a538e9b5aa0aa3ee334c965ca7bbe1c43030cfc1ffd96a90c723e5d93585d6b3d4016e6a54f7e83317d19f672fd97d2818000068c6124333ded5a7d47afc1acf644e4730c2c9b0e03aeba2aa420094a60611a5f95c7dd68cc5eff0c9e335dd66b3053d415a8123a0754a5fe80b32f294ce825b075425496ec10019e11d2b8b860c6bbe73d8313ff0d2b33fe8e5dab703de69200bbd4d5b7d24e558210ebd7de608343335fbb562c4674b564f78e3d67cca757b616bdb314f1b44ae576340418dd6b48607d5918f0604761f7242782c5db82d77f946124ab8fe7b5689be9d19cada950eb66a22f4e42ff6873391c8bad25524dd4529921c1ddbb41f487d443d43d11023db2e50dccf593a2189e5c77014a81a35a53b478d65c658bf775b16659d1ae1d43a5f99ae75d70c6dbaa4553f92034bafbe98848614d309acf2846bc67af8ac37796478fcc48ebac9f87acb1ef8827cb0c625c75799996eaebe597098294582a0ee8011e2935002f9e8ec818812e8c76b8cacc45326e2dc09b95214fbf028d427c208b3b017408a3bb1fbbb7b6c1b66a003ceef82c3fbd89fac47866e040fd82c10fb2e9be12ee0180635c6a64aa5c0fc1f8fb6c2396a2ad732108842928d373983ca04a2c2a8df90426e4e59762445f99401ec0400b7e328a6f69832d2648765aeb9700234dc0ce5be1d7308c730048c1375b3316cfc31516405eb0fee7f6a1a26ec71ba01020eb0878232e8b7b91ca605fe3a56cf4d0e7c8a1615c3b53cf8feccb1333296e074134bf47fa8e1544b3698ab4adf9bef65f4d0edac59c7f95e67e6e43060fbe95c49e6c292910afdc728595c18121dfeac7521bce91bc4216c225ad0d9b58ad63fbb466a4b44d85a0554a0b7faa2a9b735dc2f7ec3356c144721c07494f77b0872432982df4d03f0293cfdd45bf78eece86eb6be068adfba629772cd8fcfbdaf9e72e1bcec3c6415b5d1788ec1ab61c11faa0b4c133d594fe3125690a3574773ca12d275131d0124d83e8da9136363d2c2b0ad7df9d455d1e2b9d1c541ecd6c3f7dc1e71bdac014b948312fc04fd271a342df46b9b6910fb734a87d8e87798a85280ca57ec57da63eacb75f21dc0c555b06623d7c3b0cf7c3f248b978d406b456c80cef232637bd2a4f244defbfc3102370dd1e90e3a838652f190bba03eff9266a98699988ebefcd6939178f04e3cbff894c73b0fe6721b219256aa28d77b5810366ae93cfdb3de67116da25e4f7be395a7165ccfd85fba3e4fb43a666079a614f08084ae6548a7c405ba54fab9ca74a564c795595a35a037929176d1107639b6e0d971dd0f0d4c6b38cd3a1529692c5e4cdc62451581f6c6a4519a61650b64baf8968cc4261610bb6fcf1af3da4a237d8374f7610335e54caa4731592ab3ee1e74bc68e955ffbc71e688b15815e8af539423a4dfb1abe977a58e4197e61c07323a63bb01d61ed1f792da3eab11e1c38f94a1429a7ee5019d6e857a94e079a4e602d4722149e50575c32677ab8e084e7e0377573514d8033853abb4e130d84df7e51b54e1d57a9812766743fd0d3b30e3dabf71a986fe1906fd80133301504b7070270a13c79f8ec1279090f0e82489d4aa4c80ace23abcdd23c2a0e4e52ff006e1ab9605b13c9ae0c6faff1a025fefaaf5de2cde765e971fbae9fac5f69326ce9c376a365447fcd880eea988ef1f22bed89dc29d9120797ec609a6efafff98d794b717a71d7f874b675c5c3d1b1c4140ea7759c2204a6d428bd454026ed2bc76a716443475035f124a8e9328e817ac5cc532b2e3c9005253e4a4188105c5872235410fd143d761bae0244ffcf9b72bb14d8676f86b25f1f44e338d95e285abe66209b46fab93cd402b1798ee08c33c559370ce46772bc7cf479878647241f01a8c75350336507c965b5fa0b2085b0c6bec50b6be02d0fdd29a4f4fcb95b58dc1ae229bc973fae8e47cffadd4085710a1a457eabae343ceb34ccf1dd15c3d810a93ac7ee428b4efaf2202ce9d25f7670de8cab9c099379f8c71829b2e0994c689319f76ab9d63353ab133ea38f2a69bbb85c74740a87f699f2f3b1780f4372f5929c1cab0517f978adc6867e65b83854ffdf186fff6d7add6ffa1c2ccb6c13131c17f15cf1ad80e62fcd0df1ddcd388213e3c6f9e6a253af1b3b15fb3af21ce8d36470fb1f1157b8b212ec931ea3213d3fe978615533a9e061c7a95a5086e722447da4aad0e8bd161b3692757d016032f5b597ac79580e067976c70c391f6e1b1020c6a244633adae47f8e08e4d2195843e2154e6bf1fc81214ace796ce1f8d46cfa7628ccce4c880bcd8ea51ad69451262f0cd1abe85985779f7948e4dad4d1376267934afe8b991e9c46a9bd9aaab8481fad193cc3bfd335ae6f3ff6563ef5c293dac8f4b215a15e3c58fa8594a789138a3390da9a67a32cac466eb994e171bd28feb0ed03730912fb02c0fb57e0bd995aa7ebcfb1e5cbb5d7d4d5d165b9a66eb8407483c81b864c1febbc52c966c270ffe5d9c1db1e7b95ce7a6c5a0f3921468a862c3e666fdcbe06d3a4ec2d1d0a72af8e4a0e26a843194c80e69218309b88b164ea384ea85f56278b7a0c149af6c9b91ac6ae76bce5d46177b962a9c8", 0x1000}, {&(0x7f0000001380)="f8dffdaf4a32af63a8568eb3e43b6e3c4f03334988792525079f643c453dac54e0298ce10dab8366499d15df9c0dacd469fcda2ced916c43a6edce7595fa72c275068be7bd5b37f12d5ad517a02e094347cf05077a8012a5228e", 0x5a}, {&(0x7f0000001400)="53809b6fe07f438c248930b98fc4a61cfdbacdfebaf603903e4553381155de70c26c3d47cacf8929ae72268986f42b9efcb63cd4ace429eea8d503d91883d36889f76149ea761dacd7ece1c0a63447d597331a554908fcc0e39f76d2cb569e32f9db686a9eb0f52ec98ed3720a96c3cbf27dbafe91b435ff62cf6f7ee3e75f22afa78f5b9461079e55099fcae69fcb6c307aa1206357b32c185a7353", 0x9c}, {&(0x7f0000000280)="27b60dbb150b732f5decb3e15254869b2cdcae6a54df81505f656ec45ee348ab", 0x20}], 0x6, 0x0) (async) r7 = openat$cgroup_ro(r0, &(0x7f0000000140)='blkio.bfq.empty_time\x00', 0x0, 0x0) mmap(&(0x7f0000fff000/0x1000)=nil, 0x1000, 0xa, 0x50, r4, 0xf79ef000) (async) sendfile(r3, r7, &(0x7f0000000180)=0x8001, 0x7) [ 1974.427087][T28149] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 1974.492170][T28149] bond528: entered promiscuous mode [ 1974.499312][T28149] 8021q: adding VLAN 0 to HW filter on device bond528 [ 1974.516873][T28151] netlink: 'syz-executor.0': attribute type 1 has an invalid length. [ 1974.571480][T28151] bond1021: entered promiscuous mode [ 1974.577722][T28151] 8021q: adding VLAN 0 to HW filter on device bond1021 01:55:24 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f00000002c0)={'#! ', './file0', [], 0xa, "59515a47659caf973b9e14ae917ca92f630b683e3dbbdfb786e444d8ca1807617e75d4ea84e7740034cb58e33e85b0a90684602aee3723b82d150eefba64040d51239eafe86290d41e22bbbfce30d0e9fcb17fb9907a4343d3dc2df0ef85018c26582f9d7aaeb40b21abbd7a6dfdb2acb9f5cf5eed645c110f176bcf0a33b834a210ecf1a4b2ec8e2c8bdc80f9aae7e45bed2328cd068131fa60d0ab14fad4d4edaa71891623fb0c3c792ffed3ce"}, 0xb9) (async) r1 = socket$inet6_tcp(0xa, 0x1, 0x0) sendto$inet6(r1, 0x0, 0x0, 0x20000004, &(0x7f0000000080)={0xa, 0x4e22}, 0x1c) (async) recvfrom$inet6(r1, &(0x7f0000000000)=""/26, 0x1a, 0x40004050, &(0x7f0000001880)={0xa, 0x0, 0x0, @rand_addr=' \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02'}, 0x1c) r2 = socket$igmp6(0xa, 0x3, 0x2) bind$inet6(r2, &(0x7f0000001540)={0xa, 0x4e24, 0x2, @ipv4={'\x00', '\xff\xff', @broadcast}, 0x200}, 0x1c) sendfile(r1, r0, 0x0, 0x5) (async) r3 = accept4$phonet_pipe(r0, &(0x7f0000000040), &(0x7f0000000100)=0x10, 0x80800) (async) r4 = socket$phonet(0x23, 0x2, 0x1) r5 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r5, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) (async) listen(r5, 0x0) (async) r6 = accept4(r5, 0x0, 0x0, 0x0) connect$unix(r6, &(0x7f0000000080)=@file={0x0, './file0\x00'}, 0x6e) sendto$inet6(r6, &(0x7f00000000c0), 0xfffffdda, 0x0, 0x0, 0x600000000000004) setsockopt$MRT6_FLUSH(r6, 0x29, 0xd4, &(0x7f0000001580)=0x1, 0x4) vmsplice(r4, &(0x7f00000014c0)=[{&(0x7f00000000c0)="842f685fcee55cb4d1fbf66567a84479cf7ba9b3dd278ec368", 0x19}, {&(0x7f00000001c0)="21e10f30e01a247b27165bfac1ed93d8cf516948e8d72d39352360dfda9e78e3719bd0568b404fa58fe539d6eb0d5ee7bf0358b748674b3ffc638448e76a4f809eb1b459554f4fcfd892b63330a67c664c3f108d12f7cabffdccca7d454bfc0c47d09dd1c2d07df4991d3ebad7164637277abb45480dc79781def8368b97b0ad130cf668e23a107519b0c958ec7859e0158c778844cc7e3fd9ff63fe1553bafc181c3101af17c559303de7076c7f5f", 0xaf}, {&(0x7f0000000380)="325a93951aea85702edff6024a95790e7d3d6ca29e4291328ff67964122e63f31b3a99d0bba1bfbed70591a7220475ca988d855ea4158c4bd10a8371e0b54a59d4835a082b3bff1f1a0150405ce92391de3df30ac6acfe628197e24f5abc90f779d528b8e708214b81bdbc20f52bb0963771a4247e15fb3ca4d2b3bbe13357c3bc0207f1df67f7df041a14b9086cb3184a6dec2a32c9e62e5728f75b276c4ec612b7577b01ef68c18c3e28ae443e24ff50f1f247ece239303e65d5adbc5c3aa8c9f8647e920e924012c4b39780a08982c121e1f7a50336958d8ab234c613b87c4b3423c2f296595d899371f3dad756ea76fc70b9ccb30a32e3a284c6fcc18078471f39d3cff36f3e2ca16b718cb0e45c211abc236a380c4130fc0fc6eb4746f9c70eb7afe5721ebf227e7c7bf13887939195e94ddbdc4d7c3845034453dcfa452986f1e170b32df81bdc3e3ca683c5374a7935479757c7db9f13ab3824b7d747b9ab44753cc10dce0e9691b623476601808ff924c42382514a558bb0ddfb042e704fbf3422e4f19a10976ee5d0d6a86edd40327051ae4c5494d6150a1e7014f7921ae996dd10f075f1bfe87e0a484d5df7f2d17bf72905c4bb4e2cbc86138f8e4738c91f812105991018044d902ef70aff5439b42422f3cb17fed2127f40a4e1390a892d37a266bade74f402b465c56dd5a1837292ca290f91b939ed5a2617677788f33a79ca7fd4b029ed80d244911d4a09efb6c76810aef904b7b2a8b42ba0a1692884bbeb170577bc93c074ff533d61aa49c824ff22e7170ff6d55b06c4fe1bd662688cb5dcb07b4e99bd406bf82b46d99db73ba0f11400885b9dfd63f5f891de0e50311dc8f5ba9515f6cd9ec55584264de2b7cdf39cc587faa4e5873b813c195fb4c583e25957dcfcadb553c69dbbf5ae644aaf30c2a594094e9a1b307cf138495dac50cd6755e51671f44201f2358524b043dc73720cd40e433b9a40c6331fabb88418748781786e21e737aac700ec5c47357d5eaa6cc3d4e581e8f1b40d0746181b1b02f8e62cb8da7f3258ef4f2634bbf61ed0bde3d5cf1b6e032bd8242c1c8562fa8cb54eb9687e2c7f25675969d0df1db2fbbe3a85d6aadc0cec5b16ae8782c45aa92372ad9c38533137edc18361ce87f180a58f2af05ad42c7b9f71856d3b5f7e5cab7a8803d0adb2f13afb8b8f2e1c96e8c0520014967fc1e4404a80ac6caaabdf1eb66bf9e84ee2bf16124e0278893e5ef8f2ca5c013d14978d643cd53354d58c161a4103ebdc6c1acd1726663b9112001a9f5a205089af99f023add71e1877a84b9ee3d0afd260527af5277fe0b5732b6c7a011d3b29cc8abb9e79fa607150aa81089e8b328ba29bc0562c446fdc066abfdc0d284c1dec661a16c0207f2c27186f41dba8a4bb2956cff53f7797c6686f96cefc37d74b932aea0938bd12cbe793c27dcc54d964bc6022cbf6a813ae68bf48047b75ef92bc0d6da022895201234c0bd63ed267d9d416ecb04f63aba5b826c12244c01b78f71271f74ab09870edab5f19842ca5bf9a9a5140f5f8ff06888eed65b4765df7c41969c3efc608f99c6b98e94080e6da11df2e493a3674d5bb59d8226d1ea29d798261803bd2bd63a144d6b4858bd1009151f938ad04fa3277bda42611165ff2a16b36a3798ff3f16ec37b69381861bb82dadfcac06513bf098ca8d4fa81cd12c758e3590478b1fd7d69e60290f7ebd9ee210fe2eb5cb22e65ed7009024271b9c623d8dab6e10af3bf60c19bd9bbdea0e48a2110c5246a2f5ce9be034a2942bfeba648073e9d23e793cfc57be56e2b1a4c865f702de801ee3f59a0968b0f9074a482944276ae2d2c23ef41c0729d5d273c830e59fd69bfcf60cdb8cfab23534fcb8924f3f6687ec7a7b10cedda1f3f4dd0e661982c05a6b641ab8e35e99f9ba8ea72be7e5c7a5afa923ddda7ce33a3dc1c2c72d394cfe3baf55aa61dc0eb5df3c76df5ddd1bcfbcf7edfcc86bfa571d4d46643e4e10959aee446d301a5df5ffe7c30c37ea5e963798f23382c29e2c411e3db67c132a8ab89d92e157c7dce9723a0c22560762d10be9e274213cde3d0afd2dd6b315b7d02f05c755bb15f9235ff717c854bb71fb188bb7c07b10dee1eded4933bfe41f1731cf3daff1bbb29448d39d5adcfe7e33de2d7a888a2d3f9fca312669c70861e529713004ac6992097f7a40824d96b8e595299c0613e22be9a320f65d9c087e68b3396e2cc4ee9bfbb4a8d579ace797d96e7e7cf0f937750df7da69a7e646b8e2ee6363e7f0825b26ad377bbef9a553fd18d4a03285a89c20457090d3db4267029c2b9db42591cbec55fea89cd945de6d106060031e5f027c43209b31d45407d61b4f540011146da1e68e7465e8d957a7a47594360582d4a9ef998cd70b876a4efeb077c89eda387d9892a4d9f56883ba147f5721856fc64a9a902ce0c5f97eb02cb18774aaf3270804932345ea60418e1a334332ae13c3092c6516ab4334fb25228455fa219c86ce0bf4a0a37b48d34e3e2e06f4338341de5c767199f6e35fba2466e7f7c8323a10cc3fbf5bfa0e8381ccf00f902dbbe84fa8619b3ea10361dbcbf3f09ca648093776156e8408fa191d94a94577da609c760a0d4af675ff3e4acf2d0a6d35abdba10ca23efa1bc018f75c2b6fcb9647650ccb75ad72c1d82572d10e7ea9ab46a1a37a9da9d50b363539c4db2fa9bb5f67ae502e2f9e2cafe1cb531d4bfcfdc09437790bb2ea714adbf440965012edc308628ee4f49de8a933c31bbf3e87949f8c8f3896b574bcd33d09f8229e42358204664f5b69d2033256bf696a19b8fd8415a577016f8acdd1c92ba81aae4fd0955a44455b075183fb05dc6c5ea0834d0c09b2e0ff714e85a1cb32be14c7e8cccfb8c7dc49bb1cc6a5c38bebd11f6ebc006cb40868a8c79169c45ba416db0934a66f351dfa166271a1dea2878455caa616df4836b029efea230e7a5783b47bbd7529f714cf591e6818115aecd24947339d04e559975827f4f4330c6849b5948a654b95d2230c05f28db542688c200c352850e4870c07fb197abdca9c7d14928bc73044c470eb35ed799ef87332f7f498574b2c4725a7dc75c4794039d01e353472de8c11d2a399213a2dca57b47e50a538e9b5aa0aa3ee334c965ca7bbe1c43030cfc1ffd96a90c723e5d93585d6b3d4016e6a54f7e83317d19f672fd97d2818000068c6124333ded5a7d47afc1acf644e4730c2c9b0e03aeba2aa420094a60611a5f95c7dd68cc5eff0c9e335dd66b3053d415a8123a0754a5fe80b32f294ce825b075425496ec10019e11d2b8b860c6bbe73d8313ff0d2b33fe8e5dab703de69200bbd4d5b7d24e558210ebd7de608343335fbb562c4674b564f78e3d67cca757b616bdb314f1b44ae576340418dd6b48607d5918f0604761f7242782c5db82d77f946124ab8fe7b5689be9d19cada950eb66a22f4e42ff6873391c8bad25524dd4529921c1ddbb41f487d443d43d11023db2e50dccf593a2189e5c77014a81a35a53b478d65c658bf775b16659d1ae1d43a5f99ae75d70c6dbaa4553f92034bafbe98848614d309acf2846bc67af8ac37796478fcc48ebac9f87acb1ef8827cb0c625c75799996eaebe597098294582a0ee8011e2935002f9e8ec818812e8c76b8cacc45326e2dc09b95214fbf028d427c208b3b017408a3bb1fbbb7b6c1b66a003ceef82c3fbd89fac47866e040fd82c10fb2e9be12ee0180635c6a64aa5c0fc1f8fb6c2396a2ad732108842928d373983ca04a2c2a8df90426e4e59762445f99401ec0400b7e328a6f69832d2648765aeb9700234dc0ce5be1d7308c730048c1375b3316cfc31516405eb0fee7f6a1a26ec71ba01020eb0878232e8b7b91ca605fe3a56cf4d0e7c8a1615c3b53cf8feccb1333296e074134bf47fa8e1544b3698ab4adf9bef65f4d0edac59c7f95e67e6e43060fbe95c49e6c292910afdc728595c18121dfeac7521bce91bc4216c225ad0d9b58ad63fbb466a4b44d85a0554a0b7faa2a9b735dc2f7ec3356c144721c07494f77b0872432982df4d03f0293cfdd45bf78eece86eb6be068adfba629772cd8fcfbdaf9e72e1bcec3c6415b5d1788ec1ab61c11faa0b4c133d594fe3125690a3574773ca12d275131d0124d83e8da9136363d2c2b0ad7df9d455d1e2b9d1c541ecd6c3f7dc1e71bdac014b948312fc04fd271a342df46b9b6910fb734a87d8e87798a85280ca57ec57da63eacb75f21dc0c555b06623d7c3b0cf7c3f248b978d406b456c80cef232637bd2a4f244defbfc3102370dd1e90e3a838652f190bba03eff9266a98699988ebefcd6939178f04e3cbff894c73b0fe6721b219256aa28d77b5810366ae93cfdb3de67116da25e4f7be395a7165ccfd85fba3e4fb43a666079a614f08084ae6548a7c405ba54fab9ca74a564c795595a35a037929176d1107639b6e0d971dd0f0d4c6b38cd3a1529692c5e4cdc62451581f6c6a4519a61650b64baf8968cc4261610bb6fcf1af3da4a237d8374f7610335e54caa4731592ab3ee1e74bc68e955ffbc71e688b15815e8af539423a4dfb1abe977a58e4197e61c07323a63bb01d61ed1f792da3eab11e1c38f94a1429a7ee5019d6e857a94e079a4e602d4722149e50575c32677ab8e084e7e0377573514d8033853abb4e130d84df7e51b54e1d57a9812766743fd0d3b30e3dabf71a986fe1906fd80133301504b7070270a13c79f8ec1279090f0e82489d4aa4c80ace23abcdd23c2a0e4e52ff006e1ab9605b13c9ae0c6faff1a025fefaaf5de2cde765e971fbae9fac5f69326ce9c376a365447fcd880eea988ef1f22bed89dc29d9120797ec609a6efafff98d794b717a71d7f874b675c5c3d1b1c4140ea7759c2204a6d428bd454026ed2bc76a716443475035f124a8e9328e817ac5cc532b2e3c9005253e4a4188105c5872235410fd143d761bae0244ffcf9b72bb14d8676f86b25f1f44e338d95e285abe66209b46fab93cd402b1798ee08c33c559370ce46772bc7cf479878647241f01a8c75350336507c965b5fa0b2085b0c6bec50b6be02d0fdd29a4f4fcb95b58dc1ae229bc973fae8e47cffadd4085710a1a457eabae343ceb34ccf1dd15c3d810a93ac7ee428b4efaf2202ce9d25f7670de8cab9c099379f8c71829b2e0994c689319f76ab9d63353ab133ea38f2a69bbb85c74740a87f699f2f3b1780f4372f5929c1cab0517f978adc6867e65b83854ffdf186fff6d7add6ffa1c2ccb6c13131c17f15cf1ad80e62fcd0df1ddcd388213e3c6f9e6a253af1b3b15fb3af21ce8d36470fb1f1157b8b212ec931ea3213d3fe978615533a9e061c7a95a5086e722447da4aad0e8bd161b3692757d016032f5b597ac79580e067976c70c391f6e1b1020c6a244633adae47f8e08e4d2195843e2154e6bf1fc81214ace796ce1f8d46cfa7628ccce4c880bcd8ea51ad69451262f0cd1abe85985779f7948e4dad4d1376267934afe8b991e9c46a9bd9aaab8481fad193cc3bfd335ae6f3ff6563ef5c293dac8f4b215a15e3c58fa8594a789138a3390da9a67a32cac466eb994e171bd28feb0ed03730912fb02c0fb57e0bd995aa7ebcfb1e5cbb5d7d4d5d165b9a66eb8407483c81b864c1febbc52c966c270ffe5d9c1db1e7b95ce7a6c5a0f3921468a862c3e666fdcbe06d3a4ec2d1d0a72af8e4a0e26a843194c80e69218309b88b164ea384ea85f56278b7a0c149af6c9b91ac6ae76bce5d46177b962a9c8", 0x1000}, {&(0x7f0000001380)="f8dffdaf4a32af63a8568eb3e43b6e3c4f03334988792525079f643c453dac54e0298ce10dab8366499d15df9c0dacd469fcda2ced916c43a6edce7595fa72c275068be7bd5b37f12d5ad517a02e094347cf05077a8012a5228e", 0x5a}, {&(0x7f0000001400)="53809b6fe07f438c248930b98fc4a61cfdbacdfebaf603903e4553381155de70c26c3d47cacf8929ae72268986f42b9efcb63cd4ace429eea8d503d91883d36889f76149ea761dacd7ece1c0a63447d597331a554908fcc0e39f76d2cb569e32f9db686a9eb0f52ec98ed3720a96c3cbf27dbafe91b435ff62cf6f7ee3e75f22afa78f5b9461079e55099fcae69fcb6c307aa1206357b32c185a7353", 0x9c}, {&(0x7f0000000280)="27b60dbb150b732f5decb3e15254869b2cdcae6a54df81505f656ec45ee348ab", 0x20}], 0x6, 0x0) r7 = openat$cgroup_ro(r0, &(0x7f0000000140)='blkio.bfq.empty_time\x00', 0x0, 0x0) mmap(&(0x7f0000fff000/0x1000)=nil, 0x1000, 0xa, 0x50, r4, 0xf79ef000) (async) sendfile(r3, r7, &(0x7f0000000180)=0x8001, 0x7) 01:55:24 executing program 5: ioctl$F2FS_IOC_COMMIT_ATOMIC_WRITE(0xffffffffffffffff, 0xf502, 0x0) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f00000000c0)=ANY=[@ANYBLOB='\x00'/11], 0xb) 01:55:24 executing program 1: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r0, 0x0) (async) listen(r0, 0x0) connect$inet6(r0, &(0x7f0000000340)={0xa, 0x4e20, 0x3, @private2, 0x3}, 0x1c) accept4(r0, 0x0, 0x0, 0x0) socket$netlink(0x10, 0x3, 0x0) (async) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r3, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) (async) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r3, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r3, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) (async) getsockname$packet(r3, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r4, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=ANY=[@ANYBLOB="4c00000010001fff00"/20, @ANYRES32=0x0, @ANYBLOB="0000000000000000240012800b0001006272696467650000140002800500260000000000080005000000000008000a00", @ANYRES32=r4, @ANYBLOB="05c5c430a1eee255e8fcbd4c745e32420bdde5396789bff4d0d4be60a301d24178d90bca760c3617016b8f90d5cce5ec4531c86fe1b01344c144caaee14bf717138b18e12dcfdd4df1394765ebe61f0390ae08dbb5c88f247bca82b9949fbfdf573e78b1148909a6d5f545bb00dfbeb08410"], 0x4c}}, 0x0) sendmsg$GTP_CMD_DELPDP(r1, &(0x7f0000000300)={&(0x7f00000001c0)={0x10, 0x0, 0x0, 0x20000000}, 0xc, &(0x7f0000000200)={&(0x7f0000000280)=ANY=[@ANYBLOB="25c0fbff", @ANYRES16=0x0, @ANYBLOB="000325bd7000fbdbdf25010000000800080003000000080009000200000008000400ac1414bb060006000000000008000200010000000c0003000300000000000000"], 0x48}, 0x1, 0x0, 0x0, 0x4000}, 0x800) [ 1974.733806][T28153] bond1021: (slave bridge985): making interface the new active one [ 1974.768406][T28153] bridge985: entered promiscuous mode 01:55:24 executing program 5: ioctl$F2FS_IOC_COMMIT_ATOMIC_WRITE(0xffffffffffffffff, 0xf502, 0x0) (async) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f00000000c0)=ANY=[@ANYBLOB='\x00'/11], 0xb) 01:55:24 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x1200, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1974.797086][T28153] bond1021: (slave bridge985): Enslaving as an active interface with an up link [ 1974.812306][T28158] netlink: 'syz-executor.3': attribute type 1 has an invalid length. 01:55:24 executing program 5: ioctl$F2FS_IOC_COMMIT_ATOMIC_WRITE(0xffffffffffffffff, 0xf502, 0x0) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) (async) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f00000000c0)=ANY=[@ANYBLOB='\x00'/11], 0xb) [ 1974.925695][T28158] bond969: entered promiscuous mode 01:55:24 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f0000000000)={'#! ', './file0', [{0x20, '\\&-'}, {}, {}, {0x20, '#! '}, {0x20, 'memory.events\x00'}, {0x20, '+\''}, {0x20, '\\\\'}, {0x20, '\xa7'}]}, 0x6) r1 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r1, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r1, 0x0) r2 = accept4(r1, 0x0, 0x0, 0x0) connect$unix(r2, &(0x7f0000000080)=@file={0x0, './file0\x00'}, 0x6e) sendto$inet6(r2, &(0x7f00000000c0), 0xfffffdda, 0x0, 0x0, 0x600000000000004) accept$alg(r2, 0x0, 0x0) [ 1974.964592][T28158] 8021q: adding VLAN 0 to HW filter on device bond969 [ 1974.988004][T28199] netlink: 'syz-executor.0': attribute type 1 has an invalid length. [ 1975.070885][T28199] bond1022: entered promiscuous mode [ 1975.081524][T28199] 8021q: adding VLAN 0 to HW filter on device bond1022 [ 1975.148819][T28160] bond969: (slave bridge927): making interface the new active one [ 1975.160364][T28160] bridge927: entered promiscuous mode [ 1975.174590][T28160] bond969: (slave bridge927): Enslaving as an active interface with an up link 01:55:24 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x9c02, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) 01:55:24 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, 0x0, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x262, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1975.193026][T28163] workqueue: Failed to create a rescuer kthread for wq "bond359": -EINTR 01:55:25 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x8e, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1975.285843][T28166] workqueue: Failed to create a rescuer kthread for wq "bond917": -EINTR 01:55:25 executing program 1: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r0, 0x0) connect$inet6(r0, &(0x7f0000000340)={0xa, 0x4e20, 0x3, @private2, 0x3}, 0x1c) accept4(r0, 0x0, 0x0, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r3, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r3, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r4, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=ANY=[@ANYBLOB="4c00000010001fff00"/20, @ANYRES32=0x0, @ANYBLOB="0000000000000000240012800b0001006272696467650000140002800500260000000000080005000000000008000a00", @ANYRES32=r4, @ANYBLOB="05c5c430a1eee255e8fcbd4c745e32420bdde5396789bff4d0d4be60a301d24178d90bca760c3617016b8f90d5cce5ec4531c86fe1b01344c144caaee14bf717138b18e12dcfdd4df1394765ebe61f0390ae08dbb5c88f247bca82b9949fbfdf573e78b1148909a6d5f545bb00dfbeb08410"], 0x4c}}, 0x0) sendmsg$GTP_CMD_DELPDP(r1, &(0x7f0000000300)={&(0x7f00000001c0)={0x10, 0x0, 0x0, 0x20000000}, 0xc, &(0x7f0000000200)={&(0x7f0000000280)=ANY=[@ANYBLOB="25c0fbff", @ANYRES16=0x0, @ANYBLOB="000325bd7000fbdbdf25010000000800080003000000080009000200000008000400ac1414bb060006000000000008000200010000000c0003000300000000000000"], 0x48}, 0x1, 0x0, 0x0, 0x4000}, 0x800) socket$inet6_tcp(0xa, 0x1, 0x0) (async) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) (async) listen(r0, 0x0) (async) connect$inet6(r0, &(0x7f0000000340)={0xa, 0x4e20, 0x3, @private2, 0x3}, 0x1c) (async) accept4(r0, 0x0, 0x0, 0x0) (async) socket$netlink(0x10, 0x3, 0x0) (async) socket$netlink(0x10, 0x3, 0x0) (async) socket(0x10, 0x803, 0x0) (async) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r3, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) (async) getsockname$packet(r3, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) (async) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r4, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) (async) sendmsg$nl_route(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=ANY=[@ANYBLOB="4c00000010001fff00"/20, @ANYRES32=0x0, @ANYBLOB="0000000000000000240012800b0001006272696467650000140002800500260000000000080005000000000008000a00", @ANYRES32=r4, @ANYBLOB="05c5c430a1eee255e8fcbd4c745e32420bdde5396789bff4d0d4be60a301d24178d90bca760c3617016b8f90d5cce5ec4531c86fe1b01344c144caaee14bf717138b18e12dcfdd4df1394765ebe61f0390ae08dbb5c88f247bca82b9949fbfdf573e78b1148909a6d5f545bb00dfbeb08410"], 0x4c}}, 0x0) (async) sendmsg$GTP_CMD_DELPDP(r1, &(0x7f0000000300)={&(0x7f00000001c0)={0x10, 0x0, 0x0, 0x20000000}, 0xc, &(0x7f0000000200)={&(0x7f0000000280)=ANY=[@ANYBLOB="25c0fbff", @ANYRES16=0x0, @ANYBLOB="000325bd7000fbdbdf25010000000800080003000000080009000200000008000400ac1414bb060006000000000008000200010000000c0003000300000000000000"], 0x48}, 0x1, 0x0, 0x0, 0x4000}, 0x800) (async) [ 1975.476210][T28189] workqueue: Failed to create a rescuer kthread for wq "bond529": -EINTR [ 1975.748852][T28203] bond1022: (slave bridge986): making interface the new active one [ 1975.769417][T28203] bridge986: entered promiscuous mode [ 1975.781559][T28203] bond1022: (slave bridge986): Enslaving as an active interface with an up link 01:55:25 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x1256, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) 01:55:25 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f0000000000)={'#! ', './file0', [{0x20, '\\&-'}, {}, {}, {0x20, '#! '}, {0x20, 'memory.events\x00'}, {0x20, '+\''}, {0x20, '\\\\'}, {0x20, '\xa7'}]}, 0x6) r1 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r1, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r1, 0x0) r2 = accept4(r1, 0x0, 0x0, 0x0) connect$unix(r2, &(0x7f0000000080)=@file={0x0, './file0\x00'}, 0x6e) sendto$inet6(r2, &(0x7f00000000c0), 0xfffffdda, 0x0, 0x0, 0x600000000000004) accept$alg(r2, 0x0, 0x0) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) (async) write$binfmt_script(r0, &(0x7f0000000000)={'#! ', './file0', [{0x20, '\\&-'}, {}, {}, {0x20, '#! '}, {0x20, 'memory.events\x00'}, {0x20, '+\''}, {0x20, '\\\\'}, {0x20, '\xa7'}]}, 0x6) (async) socket$inet6_tcp(0xa, 0x1, 0x0) (async) bind$inet6(r1, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) (async) listen(r1, 0x0) (async) accept4(r1, 0x0, 0x0, 0x0) (async) connect$unix(r2, &(0x7f0000000080)=@file={0x0, './file0\x00'}, 0x6e) (async) sendto$inet6(r2, &(0x7f00000000c0), 0xfffffdda, 0x0, 0x0, 0x600000000000004) (async) accept$alg(r2, 0x0, 0x0) (async) [ 1975.884034][T28210] bond970: entered promiscuous mode [ 1975.889854][T28210] 8021q: adding VLAN 0 to HW filter on device bond970 [ 1975.978122][ T1230] ieee802154 phy0 wpan0: encryption failed: -22 [ 1975.984576][ T1230] ieee802154 phy1 wpan1: encryption failed: -22 01:55:25 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f0000000000)={'#! ', './file0', [{0x20, '\\&-'}, {}, {}, {0x20, '#! '}, {0x20, 'memory.events\x00'}, {0x20, '+\''}, {0x20, '\\\\'}, {0x20, '\xa7'}]}, 0x6) r1 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r1, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) (async) listen(r1, 0x0) (async) r2 = accept4(r1, 0x0, 0x0, 0x0) connect$unix(r2, &(0x7f0000000080)=@file={0x0, './file0\x00'}, 0x6e) (async) sendto$inet6(r2, &(0x7f00000000c0), 0xfffffdda, 0x0, 0x0, 0x600000000000004) accept$alg(r2, 0x0, 0x0) [ 1976.048921][T28212] bond970: (slave bridge928): making interface the new active one [ 1976.060275][T28212] bridge928: entered promiscuous mode [ 1976.098538][T28212] bond970: (slave bridge928): Enslaving as an active interface with an up link 01:55:25 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0xa201, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1976.168857][T28213] bond359: entered promiscuous mode [ 1976.185446][T28213] 8021q: adding VLAN 0 to HW filter on device bond359 01:55:25 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, 0x0, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x262, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1976.337634][T28218] bond917: entered promiscuous mode [ 1976.356527][T28218] 8021q: adding VLAN 0 to HW filter on device bond917 [ 1976.543528][T28219] bond917: (slave bridge884): making interface the new active one [ 1976.562965][T28219] bridge884: entered promiscuous mode [ 1976.597257][T28219] bond917: (slave bridge884): Enslaving as an active interface with an up link 01:55:26 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x9a, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1976.696164][T28222] workqueue: Failed to create a rescuer kthread for wq "bond529": -EINTR 01:55:26 executing program 1: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r0, 0x0) r1 = socket$inet6_tcp(0xa, 0x1, 0x0) sendto$inet6(r1, 0x0, 0x0, 0x20000004, &(0x7f0000000080)={0xa, 0x4e22}, 0x1c) recvfrom$inet6(r1, &(0x7f0000000000)=""/26, 0x1a, 0x40004050, &(0x7f0000001880)={0xa, 0x0, 0x0, @rand_addr=' \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02'}, 0x1c) accept4(r1, 0x0, 0x0, 0x0) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket$netlink(0x10, 0x3, 0x0) r4 = socket(0x10, 0x803, 0x0) bind$inet6(r0, &(0x7f0000000100)={0xa, 0x4e21, 0x315, @loopback, 0x1}, 0x1c) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r4, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r3, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r5, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r2, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000140)=ANY=[@ANYBLOB="4c0000023d1d04000000000100"/22, @ANYRES32=0x0, @ANYBLOB="0000000000000000240012800b0001006272696467650000140002800500260000000000080005000000000008000a00", @ANYRES32=r5, @ANYBLOB], 0x4c}}, 0x0) 01:55:26 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) socketpair$unix(0x1, 0x1, 0x0, &(0x7f0000000000)={0xffffffffffffffff, 0xffffffffffffffff}) pipe(&(0x7f0000000040)={0xffffffffffffffff, 0xffffffffffffffff}) splice(r1, 0x0, r4, 0x0, 0x4, 0x0) sendto$inet6(r4, &(0x7f0000000080)="202179ecbced66d21004d86c4827983d6ec102ae843687184616ff1979", 0x1d, 0x4000, &(0x7f0000000100)={0xa, 0x4e21, 0x5b, @empty, 0xb1a0}, 0x1c) close(r3) socketpair$unix(0x1, 0x1, 0x0, &(0x7f00000000c0)={0xffffffffffffffff, 0xffffffffffffffff}) r7 = socket$nl_generic(0x10, 0x3, 0x10) syz_genetlink_get_family_id$nl80211(&(0x7f0000001780), r7) r8 = syz_init_net_socket$nfc_llcp(0x27, 0x2, 0x1) r9 = gettid() getsockopt$inet_IP_IPSEC_POLICY(0xffffffffffffffff, 0x0, 0x10, &(0x7f00000015c0)={{{@in6=@mcast2, @in=@dev, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}}, {{@in=@private}, 0x0, @in6=@private0}}, &(0x7f0000000c40)=0xe8) sendmsg$netlink(r7, &(0x7f0000000cc0)={&(0x7f0000000540)=@proc={0x10, 0x0, 0x25dfdbfc, 0x100}, 0xc, &(0x7f0000000b00)=[{&(0x7f00000017c0)={0x1450, 0x1f, 0x300, 0x70bd2c, 0x0, "", [@generic, @generic="bb10ac49b6e01b4b5887437b1b833c70a02dda1bc1a8181ff2a01e24cd43c856869b0928985342ba5044cf9293c9237d154e8ada0bdb3e2b50d32498d9e3c642c2be6c1117f98c5d3b02fe6d5ce17dcfee7d9f7d65cfb2143fd4b24763cb726988b32a83bd3b1c31e885e0c44d7a84cecfd946684fc74f869bbf9ce2a1b951cbf759f966353171859f5302ed3fc86f858a09d3", @nested={0x58, 0x22, 0x0, 0x1, [@typed={0xc, 0x23, 0x0, 0x0, @u64}, @generic="bb3e165b0ac7bdabef86367bffafd893067668110d1ae7a1bcbe3736938ca4e6eb37c458a159fea7bf2b54452163d92bbc3bc15147171c2bc1293dfa7a6229dd122c8840f74a8a68"]}, @nested={0x171, 0xa, 0x0, 0x1, [@typed={0xc, 0x74, 0x0, 0x0, @u64=0x200}, @generic="8f3a8995bafdc3086ec720af96005d9616771cd2a4e0eb286186722b8b544e288a78ff30a91c334cccbe376d7b25a1f750552333559e796847cba0976b59955f", @generic="a9cbac955ccc16ddd9ea0642da22044866e42c49cf282312d2ed48a15844da1cd8f077f014496e91d86fe88664b1066043945e50861fdf70f9d6923f0bebc68b8341202b1c0b69f821d63121c628bc2a0bdead1de8cf", @typed={0x8, 0x1c, 0x0, 0x0, @fd=r8}, @generic="9bd3fb1614f1cd6c93d93617f0910b6a8dcccff4ed22e398f368e9ec08379ea220f86ab1dacd98503dc3947ce6e3107d462a8f57f69e9a0ad820ed901cdd1e0e0cda5f3708e360e8caa9f17c25a515554f13a885f37a19c95e39d4d0e7858b828458910f31c4255c5cb08271f46592f15f39f432212ebbf7039fb1c2feb0ec270b51bac2ef152d4b45591b73707993fdd8ce2411b1f51bdf37f9e0df6c1ea0f92af5cadf5cf62f5297e947c55ecacb9055a6d317402a94999677aaae918a2b431296a6"]}, @nested={0x11e0, 0x75, 0x0, 0x1, [@generic="eb4d67d1a7d75bc02f3919c92843be72ade48518a7cdce94e712d7cdc1ad5c9eae69223883e0d3842f10014421828eb4bec7b1de5921ee1dbd8bd86fe7a10fb2883cf255a8eca470dd521b9ccf5b3d3263048500fef8d2420e9f803dd346504eaa0a605bf5b411aa55728ec0bb64ab81af383f5ad920332bb44ec7cca44587d90f109cead651dcb70c63c661", @generic="a7342ada65508db7da2dcf8e44b6d6c979f5e95c81611dadf89a8e1f27bc2e59b4565cff40e77f4496fc4d67ffd0f38849f2da21ffbcf169f77d0daa456a35bd94c40fb9ab3c94153f4745215e3034b48a85dd20fe9e19b898ddc5bc61e107727977f82b53f61f09a23602dfb2b5bc11c64f306636aae47d286f57cd68251a5055e189c6d018", @typed={0x4, 0x23}, @typed={0x8, 0x8, 0x0, 0x0, @fd}, @generic="5989fc9f66a535b8c627c8b3c78ca6113c21ee025347e78c7974885d9e477092278896e6c0738330a10b67f1f31940c147658f715c2d5728482b166c4b3379bdeab54eb22b5670e80f48985baefd447f8a6877409dee00de087daf87f3a79e6f31efa05c7b94570d7259b877a3e3646d3065af417b53c282acbe24b229de7d9e9c6de6bdb1abf5f60919086dd5b2d96b750baa52c052d53a0aca65e3e61c222300957dde43873e28820a7669e626cdd1a3e2", @generic="bc4a037c5a4e9b298c4358adc01afca49ba5b3235795d1cca1e6ee06be0676d4b6655eb17c0300f198e8ebca41f43e6106d7a5a6071153575e4929c8a3415479c43489cb79ce04700749b9e50895990c02ad6f4c89d852a036fca3f66c1fa2b3998e12dd0ce3bce4f2fe7bd769437aeda0aeb83451d0488dd7603aba6a460893a4f89cd76fdafcd806f10d83f4c12e1fc54f90ac28fe9b9c5c34bedf6c440370f3084e423037b6193ad6b4662c7729c9d9e05686f422206546c33b3e8c2cd2bf801a570b6b2bfb864363a24077b5feeb85b41e7761d0cf089b6d3e5a0118676deaea4fcf2a4c33212379a610bb938361d0eec0115ba61cef4ae383f88d1c25126e62a37910c15196b6b5128779ac02baad19cb649a074635b7ac6a15db1acf53019ae69ba39c2bcd1a08e6bef9475068aed7651aa3e6fa7dff432e5500ff880271bf82e13a1d519b9c80a85afc57ccda56e43227e53167e235c3aa9aec7b0f10052e01656b927b88a316af59b27ee6f2e4ffc5bdc2890b2b3bc067a1f1b9985c76677ed1a7c9c27416a6d9864404ae45ed1dda81941a827bc75ba4fc49e984b1fa7842f1541ba40d140e8e47db0baba8a6abce760ae3fe42bf4b12d6552ecb71b0665acb0e51c167d6d9eac3d572ec4543b0bc63735c1f14c704ab07d0e8112b43aefc498dce1b02825957f4a9b443beddd2fa986886fb3166a3040e87fb307162aff63dc1e29ac48eba147298ea97ba697bd5fd31886434fb23e57737ba27567c2fc58a884d3125b35648d8fc99cc7a48f7a4ffa31072dea7867ef2684b07a33ee9e12f8086cc179f2243c4d3de43104199428c57a30377686e238be3c11b88255a618d76815bf4268e706bb5e920aab5b85158fb3c5f1c6f921c6e6029529c5742b042c1c27ba414268ba895301b9f331b0d86e2fb2021be406e7dc7997e727976e95450988a89c58040ac92e2f6875e6ac4cbc9a6bcda0ab805fa667dbf4437b989da59c6c8f1413b237c19cf1c6c1f689aac9b57de98c81094550add06913edf872b3e6f352506d4879fc9fe08d67bd35a2548286a77364038c338ac2537c62c152acf1e9f35071b093186b1c50a52b835eb60e248e087c9f9a19e6baa17d4c18740292d0ea8989d645d2c35a6ad0b9aa6275a5755cc93f2b04af16407b66497c7ce4cd73295574e963985ecadd891fffb3dd69c7617fa221ad1e0f8f1b172e512534ea83ab92d7af69c37e67bc9a1f1befc95bdb613a091a44b85843c21459b53f4f4270a7924555116b5b0920174bd5d5428bb3f7ae8427c8ba81d338c5fb6fd48a33496bda1669a6bd59373d557ac9dce86b87bc04d701b2d27a1f2fcb55fd0305e452e5ff0b911b7784d70a343fd764b6465f203b1a8dc1435b22b93d39e3a42755eccf43fa196d859b823f3ae19611c45c0ebfc80aad1d1894eaf024fbdc5ae41ec367f86c48714cfe355cbe294a21f0ece615e811af25f5f52227571ad84443c21491eba718dce148bb1edc89025fa9d4942d5ac43a534453e60e95b07e434f67659d725eae65aff3422e5895e2f2f54470998395c5c1d35b65e0f87883cbd1f706005d10f95b3bb78b35b2d47e244dbb3dd35dd12f22d575f3cf8f8677b4649c6e26a6ae969a0e452ff8418e99bf7056605b100a347ba62bc53d1ea56ecdddaf5d2de9c2b074f6aeb2e7426a14355eca7acff2bc4f27d2cf11c992fb93a134fe1aca14fbddbd116474ca8b407b8cb6cbd619ef20d2772e1caeac7e6f43f370ddf09072a3c51724551515fafe1241c3d96808a34ae1f5e72d82b9d69986eb1b4d04ab07dc15664b380d50e6c14888f55fdc640378bbeee928e278d846b6efbe6d6bfa299a4960c2e7cbe42de0c59209c1bba122e4ec698ab1a1df0cd50bc636ad36e48aa34e26e324e15dcbdb62d2c5c60ee8f175eae1f07246ce8119e89ff4ef47d11b4428de3cffac1b1c8bd739d9f6851e7cecab0341f6590871db4880c72ea5b75d3416f1e62fe0d47939f8fd69e9e93755c1a345a04ec08631cdfb8e8ce37fc5dff96dccca60f95e5d0e593c379510ccd01b7fce8485b9da05938f591fe2661d97a22612963a8babd8aedb5cfd006f54d32f30961555269914407fcadf2763786f34324dd2273cb5e85434868f34d59f249a6c8f99079ddb015ac2ac851d45421f1cf19f4f2e5ad83490766f4693090972098ee7e06a57c8b6e3d44e7d4845db7fa4943363c02316b8afbb6d261d9da714a76b9ccbe5a0f92e0184ecf7e6810f9b4c5272050ce0ecc0a2e6c022455cfeff27c056cbce250649982656e1992e075549f737e025cf04e7db3bd97ceaf52fe2ba6a2ed35f170325e4c02b0bee9860b595c98c4bfd152581842af2655b4c008b87875b55dc3e7694f78d3854cf876b8604375338f27d191c24c0f0d30d7d035ba5cb41183051f1c6e0da3b333da56dedc966bb860fbb6cae83c7dd044db5c3eeb6c4fb989a0a1b3250634ff95b3715f369c94eeb354d6817bdffe0114f82838293bfd86997644b6b47220235ec62bf61d51f632e5beed03525802176ffdd380e6e34d46e50e7fdd3d841ab6d516d1df55fc6904c6534e30922708b0a536224fc521bc240126ca1760ca32c4b26be004ae9ce9fe1d0fb9be7405358c65fef345a8d32499f87c6b776478e021d26e064cb049e1b9ed91f1bd782211ba6ce0d31800128e050c2ae974b9ebafa46e8d51eca2c2b2d069f3334e3eb76c9ecb78fbb5591ff444dfdcc120a9d4a619f9c3fc6b3d9be6a803729fa51f7da530e63b888d170f627c58fb10e2304b4b5d01ade88478473f830c44b6e7dae1aa421b84c225a98cca8a1df182d37e7148efba9f502f7f5b8bb2b796e83552b4dcd39d2829ce7a6bf699dabcb8f36134c8cd38115627ce0608a4962f5c149c48359a756b932dae1a393e8d825f473c47ea003ed0d64da2480db0eea0cc09caf353ebbcca6304d25e5ebf562fe492388fd4fb27dcbc1593e9ffdf97cd844f5ba93326110c5d2aab77957977fbbfcf6a9fce8660b920e514c1c7167fa600b57eca6c4a51620627e49ec7e75e691d60fabd09e53ec997cd624449ca073e96747070be107d48ff9c635b8e702a788614668d86eb07dc72af7b81404512eda8cb4e15d6ca6ec8b3efe74207a38b54d6392784ebbec3c6b0dc2d8a8d65d848498d96ede31a0c54ab142b1a3575747c761533dc92682ba2901b8242518a5b704a9b162e979da47ba808a7f511e8759fff37ad263929cb14e18a11ef9f86c0ab4d7b3f7141f8a50947108482188ba9b54cca223ffb6874cc496fc319d29a39444c4783b9172edb9b0fc2baf10196519053f10ffff2e99e2d2a9aa0664c11142db160723790e9f34f15e65190709856dc8339c17bcc91ec3d037b7ba1a97a3865fe11286cb63d83333a07c86107ad25fe833d399980426022415b234ea28d3aad9aaf87036eae1b60195def1e684af0d256f63fa334707b5d4dae394c5aab80e436fa1a9b19ece9fb211a829f1a39c27e640d442f36a93975156338c8fcf4357d124f5b393e589e96deb0efbb909a90c2bd0378459252a85e414ed6deeb9e69a8cbb5cda07b5ee12658e93b768630671d90f370bc479161a0e6e5a48e9e155a15441fb552561f6453a2f999736bf781d08d7ec78bce43a58e8dc058c40624cf609c26accc8117ed37f1f2fc8ea4f08bbc17cbb9cba9d2ce2cca8f1e45a3b92d39c5a6a5ecd28f12c8fba40596ff26cecc2013943a04c9fa4977e3ec593406c8b2f12081f61542fc806ab195b5df37fd97ac3b58126df40e5a81dc50d612f64df1084e308d853d0dc42614b9810df9bd1f3fd33f5cfe958bd876ac46eefd00187422b19f72c9457cd887f825cec6d2a740b4a199e82279a60ae036dc4ab69adfc334e3d7b079870b3f9cfa02ffb05b5e52dd0c0c641e7d21356341f5388598d826372d7337560f9aa7fba5a9654f1617efbd3142548b833a54d6d36a7cbc991d10e9df084687b65a148fcd28ac322969314e6ba4a11019e6142fcf8327d9433fcb5d236baaa5db9750a2b9db8d397aa14676d0033f35adb97d5373354d18ff60d6572df950694d4d773572842f2e19582c7173ccdf159da3f4520d93a51dc0d11c476084d632a9d2415afb6605e5debb147d135f7d5f8a0e46ef2ecb684f197c4adf30f5704149901d1c3cce526c6486a0e3184d2376246b375ec67b069321276e7fb10acf46f2e5a0bfc73cb68753dd9baee32d6c03b5828054039acf49fe37a551dc0e5026a5ade012a998e4f7db9863016e7d58f2d9a94d9bc048a58dea08f90265107a133add3b92525f5d83766a2f1d76d0a8a7d26de85534f10410cffb4eead8cd5b40757e5c6f587285b57e18821d2d0e3ef0c02b6f77c89624a067a77b81aaa4e3f9aeb0c5c51e8b13c16906fdcfd669fefc4d3ad99f7997c62ca2236cfb43707e9d17d411678eeabb7e801df1c9c7ea41e2caee335cd9012244892f9c4c26c523ff7af145edeb81fe9c9f905f9a0eeb5319d7376002b2bf6c2ab43f583a2ae0e1954cc1ff39ecbc6f59dbd5931598fb9e4310755aad3a82710b9a9e4c47890420b288ec67db645d7a2c61ee488114eb0e99403c5d84ac2171bd4f06cebc30da42ae0036ed31e9b87a074695e4e1aaef7c4535ff2fbc627fabdb44cda1b231fb0396ff9dd0615541559e4b0947cc41c20f109f1353e7d9415d5e9beb8ce0bde999ee24abf4e17b03b8d6b80faff084c0efd5e7805e934ca3291f95414c66f5dd87c6fb00c76979c8467fffba84888e0fc3601be41306ba5711ab9a903d2a430dc00d97f5367e48dee6dd4392e19e3e97fc01d86baae5d10aa6549b9006d0d20e71f73cde7e018485334cadbb7e08f0c7d63de58f211571135a42dc6bee8305b9902eb128da19dd3ff9e7cbfbb5293e6a158f4ec914736e3ad56fb9b12dc10fe965eb9087abcd316ab08b63c60fbf14681abd567d3e746e097a3bfe7d36a806ea1eb86b829659ebb10650796384e21bad3acdbc46251bfc52079383907414d61b31c5d5521c6b8ab5ec0f62e5b0b9005571270137fc97dc5bd577c6b989f8d6110801f63fd21b2490c8b03b3799700e29ed4f70a4fff87b4a4db60e5d618cfed4e79583c792072b309b04b8f942bae34ed06170489c87b21e563187f8bcea980922fe3d5677666518da8716caadb4faebb5927a7f4a6b704defb5d8927cb96c99d8728035da035857c0246c1ead33df11fe3222ab564698a9269bf3293c28add3c085edaa543fa085d4c448d93ac5ed04b6de698ac754b0a36c9b0b38f6f899bd072c636030a35e29b8558cab7bf4ab50cb2fd9c4e384cf54ae3abb00133685376894af9c6a2af56da61acb5e1744935903f5f0cc84491857dbbb90d19eafc2fd42868ebc7f12d86bace36917a60c10745e413b30ff56eefcc086adae2c3abe2007fa13691261f4578837650e3b9ee4e5aa493e53c47e3d964d18b709f3c94b50d8ed855ed6e9b0f552934bf23f0c7b7b1a1c95b284f586d60ce0d159ab74b5e6e4d0ff40aaf1c908bda13bcec4fdada719c112913d51a7c2767672a3ab6a9e94d9da3e6f61c66e2823a7c5878201795c6fbff2dec827a781765566b984a31166d7fa33ea15d00edf28265578b1ca377033dc6a00b10f97b1316f5486037ed3ad114dd39f0810329780b02eecd7349096bee0970e4602ae047e53e6927b4d031d48070e58c6680a03a05cf5390803fd5b754d23d0b2dc3d50af8285bae21dfe2487b41ecbe56289113fa7b645fadd19", @typed={0xc, 0x75, 0x0, 0x0, @u64=0x2}]}]}, 0x1450}, {0x0}], 0x2, &(0x7f0000000d00)=ANY=[@ANYBLOB=' \x00\x00\x00\x00\x00\x00', @ANYRES32, @ANYRES32, @ANYBLOB="1c000000000000000100000002000000", @ANYRES32=r9, @ANYRES32=r10, @ANYRES32=0xee01, @ANYBLOB], 0x40}, 0x4000) ioctl$SIOCAX25DELUID(0xffffffffffffffff, 0x89e2, &(0x7f00000000c0)={0x3, @netrom={0xbb, 0xbb, 0xbb, 0xbb, 0xbb, 0x0, 0x0}, r10}) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000540)={&(0x7f0000000140)={0x10, 0x0, 0x0, 0x80}, 0xc, &(0x7f0000000480)={&(0x7f0000000240)=ANY=[@ANYBLOB="800000001a0000012bbd7000fcdbdf250a800001fc04fd000001000022", @ANYRES32=r10, @ANYBLOB="14000500fe8000000000000000000000000000bb730000000000000007000600836b00000c00090080003040", @ANYRES32=0x0, @ANYBLOB="0800100002000000"], 0x80}, 0x1, 0x0, 0x0, 0x20040800}, 0x4000010) r11 = getgid() sendmsg$unix(0xffffffffffffffff, &(0x7f0000000580)={&(0x7f0000000100)=@abs={0x1, 0x0, 0x4e24}, 0x6e, &(0x7f0000000400)=[{&(0x7f0000000180)="94942c3d1e007dfb8404de29a8697799b1f5d6823a70813d4cc3415c6f862e8ceaac7242aef16f9f7c571f15aaacea204d20b49c43182fe1dd3de88c4a06101fc1f8d6139579492cca024fe7db0bd605ad17f17bfaab7d62fb0b847e05f9c41fbfaf79a513efae1ba322990f1327d42eabce0b83ee4fb2b875a3c4f9a1b2", 0x7e}, {&(0x7f0000000200)="993ccb04b5af9377cad757d9dbbe8345526644635ab0ecc50c5c9b41303e1e1f5b1f6161ff3f0a61f3f51dcf5eab537b55b5db80ddea43032815b7908ef405941077ae8e58627fe7265438edb56ef1b6918735c74b3b8fb318d24c30d06cd07d15f385dfd52cd11a49d23837a38ef8284140bcc827accc91e3fb964378ab5da48352949a0f4b27797b96b083028f2f6bdb579e6c1ea1809c644b8e841bb7bc0eb312d29e9fea73a71744649b830f244576a3b1b8f50150c6379a7ada43987439be4e1258efbf5d325ee5f0ad6c9d909bd73a187d299cd9d782beb7a8b2524cf2b61d2dba7e4acf6764b73c9a034907cdd5b7f547", 0xf4}, {&(0x7f0000000300)="130127c0749a951379b88d7ac86bd00a069d3e5793db16848cac09380ca3c6045e088493f74bbdd96015c04cc03eae1802359cf0a739df19bbbc910c3256b1724713e6e5c4be6c2fd26afc35a60e33dc091785fd017c569eea7264d1416c4ee26bc35c2a3ee4c8f285c9da4f7d78ed6613140dfff54f048b51827b8380edffcbbc154571185532f83a58dcf55a3657ebb73d8a261228568bf32c1e5ed7a414a2f8b30a24d5b952ae26d33311c4d23fa6db921a7464444692273f8476e09803bb860b51baeabc34a7828152bba3533e16df0300294a425b07d1a38b122b7ca71b5dfb620963", 0xe5}, {&(0x7f0000000080)="24c7145919fb2421ddb6ab6620204c0441c838579e3bbb9693a8c127c2c88f33b33f1766e6b65d233d3216d16c5ebbe3342d", 0x32}], 0x4, &(0x7f00000004c0)=[@rights={{0x18, 0x1, 0x1, [0xffffffffffffffff, 0xffffffffffffffff]}}, @rights={{0x1c, 0x1, 0x1, [0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff]}}, @cred={{0x1c}}, @rights={{0x30, 0x1, 0x1, [0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff]}}, @cred={{0x1c, 0x1, 0x2, {0x0, r10, r11}}}], 0xa8, 0x40}, 0x20008840) r12 = bpf$PROG_LOAD(0x5, &(0x7f0000000280)={0x6, 0x4, &(0x7f0000004180)=ANY=[@ANYBLOB="18020000e2ffffff00000000000000c685000000360000009500001800000000922ae83713ab9600010000801b10fb54a8cb72d232ad558c46fff4208d4990ec11ce9413ac30e00bd0081f8504e19a5183d769676520e98a263345e44d5ad12bca35510100c4d86abeb12303ff1c9fe0d0020000d60400000007d3670000008aff66d6b3181ffc1d62a3954c1198bbc4fa13aee48ca9e8969faebf3183fe803ab3f5024b52dc265b36fc9dae00a09404f01f9504d0976d252bd8d24538556e5e57bee3b8cf464ef3c6a7def8bad3ca6e3abdb21696e340bb8e2a093add57196b40def3858ef569147fa4108328392d322ab5df10a2f69a6bdf72ee7944e810d0223917c3d042410f57466f59544047d6d8ac44060000000000ee16c729300d2301800000000000002b5a8b05fcc154ad5290a8cdb97c343f454ff69dd6cbde49b28a6cb5f4fc0001745cff6e00e7ffffff0000acf3209a08439f1ff01779b6f6df7e02aa6d7760525b595fe1f697bc114ed1778e97a3f0395f946974cfb458be2a34cf924dc37b5592bf17956f3547497aba814382ff67b345b677a9d6523d87008000000400000000003fe8613ca29ff92be0d8deffff7b68136b0046d535dd39c0f35408869e9b342b953f91447e6b9eab304f134306320600a44095254b45a6c1312a13696c7202df5f764713504facc532c5a6d44d99ec7530ed7b0311000000000000e54e9072a22d911f4a2c2e2fa806e63c5cd98a8569a6d6bcfb000064885117e2ad910eae67e0ebe380d0f648713e68153579e02d71c58d147b00821ab9a6475b31e1ebf1369a04000000fbf3983f283f2f00000000992774814d63c933912d000006000000a66acb0a38856929e7d8b1b06c9bd5d7e5490f3b8596b694ea9483bd4bd287c83dd998a74694d18bdd8ad0983bc90770bbd26a82b9d99d5fc04563b523c47ef8c33400e90d02000000000000000edf1147a7afe772cd45af8aeffe2753088e02ca6bb2feec446ce7dbce66f0a93a03371320980865c7c62ea4d8f8a864dce9fa85aeb0454349100296ee2dba39c3f6fd6cf96714e11fe03b5062809a7418b165dd0336d226bac1e1223be1c97b15175d0e664beb126000e96549e1a1228c686edb475b705eaa9515c96f4fc6b3c925ea404e0f1de61026dc6c6618580fd6ce9eac602c1756f6d1056712412131ed9925989e01eae489ec7052e0ed72c326c7a8aa63999e2297c54ce1822d14b7c7699a9d0600f11f2e7f474cffbc35bc8623cd5eb68af82275a940be0400000000000000bcc3fbe7d90de96d6a8e9f32f18d1f606b381e4903b500000000000000000000004a2357ba5f6000de1cfa88b7165dcf4f2aaee86d4802000000000000008fdb686d5da2a42e4b5024b6535811f362201d4f82012e6af704973d04ea923c19e6cb723c1923b3eea2d73e176dff383c9fbbac53dfdcb1a68c98e96fe39eec23963faf3ebed3409144c7c53d6318ced678a621450a9b01e9f2772e5f2999d3435da02556e36c3215d2bd4e96c93bff3ad04a82ff3cfadcf65eb92adc6c68d66b11cb2d7556414a86dfa94bb7aa52c7febb1e9b2efcbbc5bccf9d39bed802f4f056976a9a362ee9cc624ec454b90200fd9603f96908bddc14500000000000000000000000000044d917c62b27679913075731e8fddb07c10c82002d60181588ae63a440454287de9e340f611267f37bdd0f2d21cb06fcaf45a0a297e396f428d43371424b307eef82c5d6d19f3ef0d3b8f7fa51957e3099caab31133b34a1d3eebc0f0c9056df2e9667ba0b55695c7894010079b07e7aef7785e2486472b5cba1f3346c1e8e23deb8c82bb6eb2c72c484241dc3b66da78260f800fffd39368b952f6f4a10295c50c887a31d8b543c5d10f2dbd4d0b84eaad43feb6e169a9f2fcff7000000000000000000e011bc6366f56fa787f212c1f8c0f47f50b1e9b5d841ea55fe569bb7bf1e78191c8a02ad436725771738a2a98891971e3b932352896e1ea10f62e8ef7a87e16151b39d6c27575714540d8c293a3fa4b5a825360423c1cbc8b5d19167152823ed853140edda002c16c842b168bb55f6bb713deb57d0aa78d6d4e5fc5be2c402bd246128f41bcb02000000892b135a92e8c844938aa98ba4839a1408a696454d40e5eed4d4dce481ca86bfac54c330331b7f2cde17cbaeb0377696faf546ecbe742d73d47d726a50f6e752f3325255bd7e8b5923aa3cfb6f7e06494f21ca450139c558000000000000000000000800000000000000000075aa0000000000000000000000005560bd9eb81e839e4992e64b074a66cccccf00334fa94da8477be7d99b558ec6a5b1596ac1e7617c6b32eed0cc70286caf2c5189a103f4b0b04aff171c4d388ccf67fea37e782f025c94c853cde330a193a967d907a8c88fcb033e680f559a72150cb900bafcd536f48797915a2fe9922ce27300009e1b36aa4730117d9b00000000003c630000000000008fbbd11b015c415ca04192fbfb1a8b0e3460af35771dbac10062835c9bab3ad09f7a022c52d8000000000000000000004000000000000000000000000000000000000000000400000000000000000000000000006ec473c54399b7b8aa1ee46132fc45da8292631178cecf19550108b8b8423de42957ffe9bb6d752e68d2bc2ce777a17bf4dfdfee5de0f3e4dadf51ab9562827b762fa611ba5f32861c19dffe1dc9fd5c41cd46cf131fd6b0c2ddad90ac33f768f9ecc70327c59918fa5a249befe98262f53c8182d95f6da3698a6a88c2c31d801a8f1f5e0ce05138d5422da0a6a62b9dfe1f39775d1d0c9186096415f544aaf76b0a1c877a6c826a5adcfb22c4a0e5a46271caa3eaf4f389dd5f3c20dbddc0377a4266d7b9fd61b9287e9b4be0a413ee31be0ddecab0ef7b25cba1fb3654ddf291ecb7768ac1e177042cb4c452fa6b3966950000000000000000c187da23d6855500fe8510b51e13a890e394b84a6ea2cc8d42b97c697c29122298d55e2e1cca8e07abda2606a3f381c64b9fec0000000a7965e4854e8e3572ad5149b3872342dea9252132860c9af1bd5fe263c0313dea5d6e0c11a466d6892ed65f34667dd79b07b5cbdd8aa7dd561a26b5562d4861a7e1b0f48930e0b696ea3bee7eb72794e163d7aeac9a0fa5403ac9cb421eae283b0550f1d0d339cd7b96e71d3ab48ad9d7975e0c9b117f71d3ab80a0c9b0284ecc469fa6181c9c71fce07a6ffb23296a107763138e8d9876291af2076890c47925ac773d95d2ca42acb3e5f3a1550665b898462c139ffd0106bc8a61b6117d252efcab7106b4c3a3c13a70ff452e9d2096142c517b0e91b5cf88332faca5b3ee96363065c3ce32d3d39ec36e20d597e05664f2526bd918090649da11f7299789d00f5024df1e99d3efecb9b457642fe810370ba4fbe00fa60a28af966a27a1659e448bbe43a1dcd2ea760018b57a36ac41ef2051a7b703d55c0602540663016e20d50385766df4dac47802a55bd38dd767ee9960c6daa704fc5d01a1459134d1b9edfde3be9e25a110228c64253588ff420644dbc0854e69a7bdda72f93ceaccf92cfe7dd6296c950db10f6dd8a5ef9b73cf6a12a1ba16fdc7e35b805f4fd2fcff0a623722149c1465e4de2d53f0f10b14c21865027abc71a12cb1e9f8029c7a20000000eeb0d53a83e518c8d2052c08b515d9d0bde24ac4e798040c7db0bb03c019507d6377f3d5dd94a27abc6d6b120d61f772407e0d2cb50d29168b68aef9f176b4c3aa8b21279d4ea9c1f669aa8c2c17d5b3a8d1dda58d26f1019af04b7774c85d5bce8be010f27c5211938031c3404680b01279c778bd1fe1b48c4b5b8e0fe756e54a8d76b7cec5e3407d93b4eadc446440607de844acf5524a4657e33af2115547b735b57b5092d0bc8fa6acb832509abe0882d570ce400aaebd7baff88526608d6991aac95751671174129457e4a03aca69d82b64b89e6ad6ed1e275ec5002e48170e4c7b4f3971481098dedb88fba90770e44bf404d5a97fefe2fe8e459fe45933b78c7ab5fe985a480193a20fb07da1455fb283df68af569ac82aa6dc703e29bf158931fb79f2abfa6ff7eb8c4f381c9da58bea460e2ead969933e5391970ca4fddd64da2e5df9c4d82044068caaaab771b37bb06bbe673056d849825525f1120b2250f6b8520381f7a74b1c687781cb6b23e67b918844b83dbaeeb559ec8520d710dd6d6b4e64838bd434a36ed03fc0c488b24571032ffbc9f8ce97041e1bc4729d539358dc9599c1266b9ce2cb6dd0ad57a6e9d3d4a11a27f70b2934c96237e2ba09c58eeda678d4d08b6da99b7a86e946215afb1b48792fde54492e306cb5342e2589874b603a1de972b1f09cc350096f5c3e814118af9ba0793cfdf20c77b34eacfdf63ce59ec4d2f867bf884e941559b068d908325667672b5e1cf71f4829c0493e8b141489ed926b822becead7a0a2b4a4c008ab16b616d60f347e4da54f06443507efe57ea62399ef4eb11b2f559e1b056456a53998bf1c6d13c92e75136147f91ae3a75ca15eb1b51bf700b3c0bf54bc3745ff313c5e75dc66386897f6ee45429371b8d0878c442ad2fe9baf85c1390da13efc353ccbef950c29f39ddf436f0d9bf1be1515ed251d8b6f11ecb16b1e8d1ed04196e9b6c2f9e068b7749bb6c1f533e493f22c901662c65cb761dc2eeff2f698bd4dbae83e2dfdc4f1c7f918a00515c1bc189d10ec22b35c92725cbf0ba244fd029c4f026f68e000000060000ab0476c3fd7f7c1e5c000000000000000000000011e43e39d3f4394fbfa13c416b1c443c5e52eea726491ad75100ebad7c6d5a665c59a3fb158e43da904f19e7e8daa4e90390b8da945f6cd78536c0d2be07221f85ad46b180f256d4d84592691d15d65896b66b63a46705338b67b72dc1c3075fcdc5cbffb0366151632ba5be8ae815dfea9fadfd31c473a24a73d3e5116c3023b3563c72d26fbd59877132bde5ca4ef8d92fd3613c768b35223f6fd0b5e9a8b98cccf1e2b4612e620e3a159d6365c9045aaa826aa0ee6d26cf0397ce674c20824584b464ebdc2f3ea26a7aec4570b242a6677a4e9187f8591c3a9bdc0000000000"], &(0x7f0000000040)='GPL\x00', 0x4, 0x1076, &(0x7f0000000300)=""/4096, 0x0, 0x0, '\x00', 0x0, 0x0, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x70) bpf$BPF_PROG_TEST_RUN(0xa, &(0x7f0000000200)={r12, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, 0x48) r13 = bpf$PROG_LOAD(0x5, &(0x7f0000000280)={0x6, 0x4, &(0x7f0000004180)=ANY=[@ANYBLOB="18020000e2ffffff00000000000000c685000000360000009500001800000000922ae83713ab9600010000801b10fb54a8cb72d232ad558c46fff4208d4990ec11ce9413ac30e00bd0081f8504e19a5183d769676520e98a263345e44d5ad12bca35510100c4d86abeb12303ff1c9fe0d0020000d60400000007d3670000008aff66d6b3181ffc1d62a3954c1198bbc4fa13aee48ca9e8969faebf3183fe803ab3f5024b52dc265b36fc9dae00a09404f01f9504d0976d252bd8d24538556e5e57bee3b8cf464ef3c6a7def8bad3ca6e3abdb21696e340bb8e2a093add57196b40def3858ef569147fa4108328392d322ab5df10a2f69a6bdf72ee7944e810d0223917c3d042410f57466f59544047d6d8ac44060000000000ee16c729300d2301800000000000002b5a8b05fcc154ad5290a8cdb97c343f454ff69dd6cbde49b28a6cb5f4fc0001745cff6e00e7ffffff0000acf3209a08439f1ff01779b6f6df7e02aa6d7760525b595fe1f697bc114ed1778e97a3f0395f946974cfb458be2a34cf924dc37b5592bf17956f3547497aba814382ff67b345b677a9d6523d87008000000400000000003fe8613ca29ff92be0d8deffff7b68136b0046d535dd39c0f35408869e9b342b953f91447e6b9eab304f134306320600a44095254b45a6c1312a13696c7202df5f764713504facc532c5a6d44d99ec7530ed7b0311000000000000e54e9072a22d911f4a2c2e2fa806e63c5cd98a8569a6d6bcfb000064885117e2ad910eae67e0ebe380d0f648713e68153579e02d71c58d147b00821ab9a6475b31e1ebf1369a04000000fbf3983f283f2f00000000992774814d63c933912d000006000000a66acb0a38856929e7d8b1b06c9bd5d7e5490f3b8596b694ea9483bd4bd287c83dd998a74694d18bdd8ad0983bc90770bbd26a82b9d99d5fc04563b523c47ef8c33400e90d02000000000000000edf1147a7afe772cd45af8aeffe2753088e02ca6bb2feec446ce7dbce66f0a93a03371320980865c7c62ea4d8f8a864dce9fa85aeb0454349100296ee2dba39c3f6fd6cf96714e11fe03b5062809a7418b165dd0336d226bac1e1223be1c97b15175d0e664beb126000e96549e1a1228c686edb475b705eaa9515c96f4fc6b3c925ea404e0f1de61026dc6c6618580fd6ce9eac602c1756f6d1056712412131ed9925989e01eae489ec7052e0ed72c326c7a8aa63999e2297c54ce1822d14b7c7699a9d0600f11f2e7f474cffbc35bc8623cd5eb68af82275a940be0400000000000000bcc3fbe7d90de96d6a8e9f32f18d1f606b381e4903b500000000000000000000004a2357ba5f6000de1cfa88b7165dcf4f2aaee86d4802000000000000008fdb686d5da2a42e4b5024b6535811f362201d4f82012e6af704973d04ea923c19e6cb723c1923b3eea2d73e176dff383c9fbbac53dfdcb1a68c98e96fe39eec23963faf3ebed3409144c7c53d6318ced678a621450a9b01e9f2772e5f2999d3435da02556e36c3215d2bd4e96c93bff3ad04a82ff3cfadcf65eb92adc6c68d66b11cb2d7556414a86dfa94bb7aa52c7febb1e9b2efcbbc5bccf9d39bed802f4f056976a9a362ee9cc624ec454b90200fd9603f96908bddc14500000000000000000000000000044d917c62b27679913075731e8fddb07c10c82002d60181588ae63a440454287de9e340f611267f37bdd0f2d21cb06fcaf45a0a297e396f428d43371424b307eef82c5d6d19f3ef0d3b8f7fa51957e3099caab31133b34a1d3eebc0f0c9056df2e9667ba0b55695c7894010079b07e7aef7785e2486472b5cba1f3346c1e8e23deb8c82bb6eb2c72c484241dc3b66da78260f800fffd39368b952f6f4a10295c50c887a31d8b543c5d10f2dbd4d0b84eaad43feb6e169a9f2fcff7000000000000000000e011bc6366f56fa787f212c1f8c0f47f50b1e9b5d841ea55fe569bb7bf1e78191c8a02ad436725771738a2a98891971e3b932352896e1ea10f62e8ef7a87e16151b39d6c27575714540d8c293a3fa4b5a825360423c1cbc8b5d19167152823ed853140edda002c16c842b168bb55f6bb713deb57d0aa78d6d4e5fc5be2c402bd246128f41bcb02000000892b135a92e8c844938aa98ba4839a1408a696454d40e5eed4d4dce481ca86bfac54c330331b7f2cde17cbaeb0377696faf546ecbe742d73d47d726a50f6e752f3325255bd7e8b5923aa3cfb6f7e06494f21ca450139c558000000000000000000000800000000000000000075aa0000000000000000000000005560bd9eb81e839e4992e64b074a66cccccf00334fa94da8477be7d99b558ec6a5b1596ac1e7617c6b32eed0cc70286caf2c5189a103f4b0b04aff171c4d388ccf67fea37e782f025c94c853cde330a193a967d907a8c88fcb033e680f559a72150cb900bafcd536f48797915a2fe9922ce27300009e1b36aa4730117d9b00000000003c630000000000008fbbd11b015c415ca04192fbfb1a8b0e3460af35771dbac10062835c9bab3ad09f7a022c52d8000000000000000000004000000000000000000000000000000000000000000400000000000000000000000000006ec473c54399b7b8aa1ee46132fc45da8292631178cecf19550108b8b8423de42957ffe9bb6d752e68d2bc2ce777a17bf4dfdfee5de0f3e4dadf51ab9562827b762fa611ba5f32861c19dffe1dc9fd5c41cd46cf131fd6b0c2ddad90ac33f768f9ecc70327c59918fa5a249befe98262f53c8182d95f6da3698a6a88c2c31d801a8f1f5e0ce05138d5422da0a6a62b9dfe1f39775d1d0c9186096415f544aaf76b0a1c877a6c826a5adcfb22c4a0e5a46271caa3eaf4f389dd5f3c20dbddc0377a4266d7b9fd61b9287e9b4be0a413ee31be0ddecab0ef7b25cba1fb3654ddf291ecb7768ac1e177042cb4c452fa6b3966950000000000000000c187da23d6855500fe8510b51e13a890e394b84a6ea2cc8d42b97c697c29122298d55e2e1cca8e07abda2606a3f381c64b9fec0000000a7965e4854e8e3572ad5149b3872342dea9252132860c9af1bd5fe263c0313dea5d6e0c11a466d6892ed65f34667dd79b07b5cbdd8aa7dd561a26b5562d4861a7e1b0f48930e0b696ea3bee7eb72794e163d7aeac9a0fa5403ac9cb421eae283b0550f1d0d339cd7b96e71d3ab48ad9d7975e0c9b117f71d3ab80a0c9b0284ecc469fa6181c9c71fce07a6ffb23296a107763138e8d9876291af2076890c47925ac773d95d2ca42acb3e5f3a1550665b898462c139ffd0106bc8a61b6117d252efcab7106b4c3a3c13a70ff452e9d2096142c517b0e91b5cf88332faca5b3ee96363065c3ce32d3d39ec36e20d597e05664f2526bd918090649da11f7299789d00f5024df1e99d3efecb9b457642fe810370ba4fbe00fa60a28af966a27a1659e448bbe43a1dcd2ea760018b57a36ac41ef2051a7b703d55c0602540663016e20d50385766df4dac47802a55bd38dd767ee9960c6daa704fc5d01a1459134d1b9edfde3be9e25a110228c64253588ff420644dbc0854e69a7bdda72f93ceaccf92cfe7dd6296c950db10f6dd8a5ef9b73cf6a12a1ba16fdc7e35b805f4fd2fcff0a623722149c1465e4de2d53f0f10b14c21865027abc71a12cb1e9f8029c7a20000000eeb0d53a83e518c8d2052c08b515d9d0bde24ac4e798040c7db0bb03c019507d6377f3d5dd94a27abc6d6b120d61f772407e0d2cb50d29168b68aef9f176b4c3aa8b21279d4ea9c1f669aa8c2c17d5b3a8d1dda58d26f1019af04b7774c85d5bce8be010f27c5211938031c3404680b01279c778bd1fe1b48c4b5b8e0fe756e54a8d76b7cec5e3407d93b4eadc446440607de844acf5524a4657e33af2115547b735b57b5092d0bc8fa6acb832509abe0882d570ce400aaebd7baff88526608d6991aac95751671174129457e4a03aca69d82b64b89e6ad6ed1e275ec5002e48170e4c7b4f3971481098dedb88fba90770e44bf404d5a97fefe2fe8e459fe45933b78c7ab5fe985a480193a20fb07da1455fb283df68af569ac82aa6dc703e29bf158931fb79f2abfa6ff7eb8c4f381c9da58bea460e2ead969933e5391970ca4fddd64da2e5df9c4d82044068caaaab771b37bb06bbe673056d849825525f1120b2250f6b8520381f7a74b1c687781cb6b23e67b918844b83dbaeeb559ec8520d710dd6d6b4e64838bd434a36ed03fc0c488b24571032ffbc9f8ce97041e1bc4729d539358dc9599c1266b9ce2cb6dd0ad57a6e9d3d4a11a27f70b2934c96237e2ba09c58eeda678d4d08b6da99b7a86e946215afb1b48792fde54492e306cb5342e2589874b603a1de972b1f09cc350096f5c3e814118af9ba0793cfdf20c77b34eacfdf63ce59ec4d2f867bf884e941559b068d908325667672b5e1cf71f4829c0493e8b141489ed926b822becead7a0a2b4a4c008ab16b616d60f347e4da54f06443507efe57ea62399ef4eb11b2f559e1b056456a53998bf1c6d13c92e75136147f91ae3a75ca15eb1b51bf700b3c0bf54bc3745ff313c5e75dc66386897f6ee45429371b8d0878c442ad2fe9baf85c1390da13efc353ccbef950c29f39ddf436f0d9bf1be1515ed251d8b6f11ecb16b1e8d1ed04196e9b6c2f9e068b7749bb6c1f533e493f22c901662c65cb761dc2eeff2f698bd4dbae83e2dfdc4f1c7f918a00515c1bc189d10ec22b35c92725cbf0ba244fd029c4f026f68e000000060000ab0476c3fd7f7c1e5c000000000000000000000011e43e39d3f4394fbfa13c416b1c443c5e52eea726491ad75100ebad7c6d5a665c59a3fb158e43da904f19e7e8daa4e90390b8da945f6cd78536c0d2be07221f85ad46b180f256d4d84592691d15d65896b66b63a46705338b67b72dc1c3075fcdc5cbffb0366151632ba5be8ae815dfea9fadfd31c473a24a73d3e5116c3023b3563c72d26fbd59877132bde5ca4ef8d92fd3613c768b35223f6fd0b5e9a8b98cccf1e2b4612e620e3a159d6365c9045aaa826aa0ee6d26cf0397ce674c20824584b464ebdc2f3ea26a7aec4570b242a6677a4e9187f8591c3a9bdc0000000000"], &(0x7f0000000040)='GPL\x00', 0x4, 0x1076, &(0x7f0000000300)=""/4096, 0x0, 0x0, '\x00', 0x0, 0x0, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x70) bpf$BPF_PROG_TEST_RUN(0xa, &(0x7f0000000200)={r13, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, 0x48) r14 = socket$inet_tcp(0x2, 0x1, 0x0) r15 = openat$tun(0xffffffffffffff9c, &(0x7f0000001f40), 0x226e80, 0x0) sendmmsg$unix(r6, &(0x7f0000002000)=[{{&(0x7f00000006c0)=@file={0x0, './file0\x00'}, 0x6e, &(0x7f00000009c0)=[{&(0x7f0000000740)="0b8ab99004e3f31defdc48d3025abdedf7e1436310ef51cf44ce8b14ed458eee091eb0bfa9ec6f0530606ae03baab7085baef5fd1ce24e4479139dc4dbf086dca9fb731ce0385a7d9bd14d72006b8e72005e1e71f41dbd53335c37b2ecbe7941cfdc3168585a4057ee4cbebebd0b2af6fe0f5129031279c764d5097dfffc950cd7ee68834359b30362010e88f39689a19c66eeca0764ca6ef31cc6cf5870f9995f9eff9e07879fa9966431854517a5812b3860e1320552927cce5a5cf4bae4b92a99937e06ae1d5cfd71171d362cf2298d458b1c35eab25d61ef4f1355", 0xdd}, {&(0x7f0000000840)}, {&(0x7f0000000880)="c5d23dc36fe0e908e0409c995a363d052e8fcb37f0a7e11440e469571a44f62eedee9bc74f2a9e8f59bd9451dc2ccfcad04beae22ddc912d0ebf78d2124f1d3684a18002288e5b23fed6c08d8e", 0x4d}, {&(0x7f0000000900)="c66b6d33a8857c4f93fe523462d8782c37954f8bfca42bfb0e73762788d9bcaf89401118b776dcf3cb8e876a8fe7adcd3757e11d41e7aa", 0x37}, {&(0x7f0000000940)="c3ef119ec808f72ec4094b679fbf1aaf2cf38e8b39cd24a43f3c7fb2c8f302cbaf342da1680f6165a3704b47ec0e6e18b3e48aa2225c7b606b86e4c6d0cfb77c8e5f41242f7d0af8d8bb4d53c7588e4690332e2727fac37a7120595946460de696bd55193672d1", 0x67}], 0x5, &(0x7f0000000d80)=[@cred={{0x1c}}, @cred={{0x1c, 0x1, 0x2, {0x0, 0x0, 0xffffffffffffffff}}}, @cred={{0x1c}}, @rights={{0x38, 0x1, 0x1, [r1, r6, 0xffffffffffffffff, r6, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, r5, r2, r4]}}], 0x98, 0x4}}, {{&(0x7f0000000e40)=@file={0x1, './file0\x00'}, 0x6e, &(0x7f0000000fc0)=[{&(0x7f0000000ec0)="42c9c677a79354bd03351dc160ca532fc656c65e8c3fc6f9161ef0a747a325aef705cebd83fc08e9d9217d673d71e27dfefb27f27c24c200297385b42d21391719ae3de89331cf25463aca3dca69db5722b556b8d830e2f86dfaa65c22112e8d34251ff3bd47d72b491c190ef2d35571d9ea7613a3585a49b4d61abe3553d69bc7ea515f3ef78ea2256e3abc70ecb82162cfddafbf8a49e84c8ee0647c67465edf50e9cb18e09d6d2eead989a72e68c8382ad18e3ca115f01e3debd9d2d0429f275c75", 0xc3}], 0x1, &(0x7f00000010c0)=ANY=[@ANYBLOB="180000000000000100000000000000", @ANYRES32=r4, @ANYRES32=r3, @ANYBLOB="2c000000000000000100000001000000", @ANYRES32, @ANYRES32, @ANYRES32=r6, @ANYRES32, @ANYRES32, @ANYRES32=r5, @ANYRES32=r2, @ANYBLOB="000000001c000000000000000100000002000000", @ANYRES32=0x0, @ANYRES32=0xee00, @ANYRES32=0x0, @ANYBLOB='\x00\x00\x00\x00'], 0x68, 0x41}}, {{&(0x7f0000001140)=@file={0x0, './file0\x00'}, 0x6e, &(0x7f0000001200)=[{&(0x7f00000011c0)="328e8616730e94d95ec24b9c6f8873e178bfada5b3e6c050c3d8ab2137bd5e89358b1d193b640f72687e499af2d4c7e95b066e", 0x33}], 0x1, 0x0, 0x0, 0x20008000}}, {{&(0x7f0000001240)=@file={0x1, './file0\x00'}, 0x6e, &(0x7f0000001540)=[{&(0x7f00000012c0)="3722910fade2a52d513917386f047c684997907911e8504d352e3220d07a2afc768b", 0x22}, {&(0x7f0000001300)="90b1e1c1f7c02e84210cc0f3866836ab070f6333119a4ab800d216a17f448ef4bf99deeb7271cb1780b8014c3fde40f2d14a07bd146b32b6d9a0a072913beedc22f5de01dbd949771ac9525b71d4379d848c1990dd758c74", 0x58}, {&(0x7f0000001380)="c144ba1bb64838e8891517e307fe3bbde227c5172dd75585cae80b1fd6a3c84cf520513f2e2ec703e64683782d98c9d5f58e6c2a4b5f5a92cf7aae65462093b29b09f70cd08153a7d78ae7c5aa9cef48aa8a4315eda93fe4d458c94038b9fb2f279b9401d404f4cdc8b32e128aa4088afb27976308a4ebe5fba77aa08d13eba68027ce1279119b13fcfd8aae88a348", 0x8f}, {&(0x7f0000001440)="c565ee8b3a54b1d16810dfa319503e24b9bbbc78c23d66c12153e4df62d8bc649b4d83252d4481ec5da2fead7befd8e4bf339e22f768ceefdcdaef63da61fd2e0d2e05a918d2ef89726ff760bab4ad3686bc4a267b1078dfdf2f81bc6df3d2d4c6a5d7999c4d94c478a6257d823ad13b7d6a9c474a44aae5b9c8a6e2c6b31e91af0b379e82856d4ac18c49c2580e99221b868fa0404e15893353a8093972dc90beb899b5bd6f70838968a841cf1d5491ed8be7c863", 0xb5}, {&(0x7f0000001500)='?', 0x1}], 0x5, &(0x7f0000001800)=[@rights={{0x28, 0x1, 0x1, [r3, 0xffffffffffffffff, r5, r4, r3, 0xffffffffffffffff]}}, @rights={{0x28, 0x1, 0x1, [0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, r4, r5]}}, @cred={{0x1c, 0x1, 0x2, {0x0, 0xee01}}}, @rights={{0x14, 0x1, 0x1, [0xffffffffffffffff]}}, @cred={{0x1c, 0x1, 0x2, {0x0, 0x0, 0xee00}}}, @cred={{0x1c, 0x1, 0x2, {0x0, 0xee01}}}, @rights={{0x14, 0x1, 0x1, [0xffffffffffffffff]}}, @cred={{0x1c, 0x1, 0x2, {0x0, 0x0, 0xee00}}}, @rights={{0x18, 0x1, 0x1, [0xffffffffffffffff, 0xffffffffffffffff]}}], 0x118, 0x8000}}, {{&(0x7f0000001940)=@file={0x1, './file0\x00'}, 0x6e, &(0x7f0000001e40)=[{&(0x7f00000019c0)="e9e32c41e4161ac80cf71fc8619599e7e3d7cf606196aa75158c56c97f1244b4683f934607e09d391d23148500dbe32cf66c3bc3b3fe4f8ed5e8c2b7c0a735d8ae508fd66c7a7014604208ba43277a4ca6a73dcfb845d2a4929284259e4be16a3674b05f189a36616cd1c534f3bcc344006a514f1627465f8e4487e6416402d4", 0x80}, {&(0x7f0000001a40)="f29f50bd9a9d9d77a0c84ff27e307ae4b9441b8928b2a4ecea8559bed6289e0bec18ac42056f773607516328b869d6e315e7d0911b3041d57d8d54868b92de98d9dd133953d90e4f2b37a923e3524c505a605f57e9d3239ebd594b3d8c7c3def9ecd7977d8f7a77d95b37239cfca0fbb437cbd6c23ef368978c89d260500211ac9af1ed37e18388a33f76a4c0b3f92a60df65555eaefcd06373ba1ce5dabbf0c8041", 0xa2}, {&(0x7f0000001b00)="939d80c0e28ee263596d7815f32a55119cd251c2bb70a3e29c0f4cdaf717ee454d34169b4368f944f06b995d2eb11ebdd2c57d23caa9d657d5cd666d93b5aa99", 0x40}, {&(0x7f0000001b40)="18de3d1a090c3248c3c138466b46d981f5961f9b44e631aa8e156483887bccd733d1cea050d28fd1447a8ef885d4886cc33dbb0fbe81267452e02cba175af212525be34eae59267002de2a11337cc21f2715b764d8f5a51788d8baf9ce81e54df913134bf123c0ddc26645bfabfe84d7c34f1b3d11a50646e9766a784c3bbc381f3bc17e7850a425ed259b28d06086e6c92eeba316cade95f350d26854220cea30696661edba53d1", 0xa8}, {&(0x7f0000001c00)="2eed2d06bc4a7c4cb6420577c3f7347ed4943868cfa5e038c5689196045379745d3a5799845c", 0x26}, {&(0x7f0000001c40)="a34d25b014da5df8c04869f6eabe60886f898ce27ef40b830744f11225290ce8e2281cad7d7348792ab5f2", 0x2b}, {&(0x7f0000001c80)="6391b92ea42ec8033e2bc025e0cd282b23f182489f7850a35f72648f5b5f82d0794b91b021f45e8d64f358713d8397db07a64ea062663ebfeba258c7dd42b0ebb807b566bef62d97b2d14da9de17b19e3c4d85230a86899d158b7e0441f613e3d8fb654346f52daf1e19881309cc8f5d15e7cf27c5b37c1323df5d497625a01342996e8b8abf8a0400922bf028a6bd79f998a6165c913d9a68a19847978d372d57a40cedadf47dc3e2ee9995f8897885eff2f3cc532dae78e3", 0xb9}, {&(0x7f0000001d40)="7d32f4f3a02f098e56d123568c67a8b7d72628f01dc541a58134d2ccf04c55e2d42f38cf4aecf5b308692cd14681e8f7d2e1bf88e4668890a355919dc8899b3fcf25099076182875cf2aa3d3c83368f7bc2ac97f88c13c783b256f20747fbe5ea6a0afb70d874bf0c1b56b7bf8710e37e01e1af2735ce8a1fdfcec767ba9408d87d9c7ffd44b96b93abf95efdda9751d44010d5ac10fdc83cd2c89e2424c5b5052f7f6eeb84559b76ccd48c1e381d25886368d327e1de296eed50b9232a79555ea3c9d5dfe66ba5ff90a2d0a0ba9b4e5b63a635b22011b9400dca0b7bf17ec1d037a02a0c7a8f2cd875e6f68ac9b1d5a67bea3ee913099b20435cc", 0xfb}], 0x8, &(0x7f0000001f80)=[@cred={{0x1c, 0x1, 0x2, {0x0, 0xee01}}}, @cred={{0x1c, 0x1, 0x2, {0x0, 0x0, r11}}}, @rights={{0x20, 0x1, 0x1, [r12, r13, r14, r15]}}, @cred={{0x1c, 0x1, 0x2, {0x0, 0x0, 0xee00}}}], 0x80, 0x24004090}}], 0x5, 0x40008804) sendmmsg$unix(0xffffffffffffffff, &(0x7f0000004000)=[{{0x0, 0x0, &(0x7f0000001540)=[{&(0x7f0000000100)="dbc4f24b18fce59e560c179fe1a0e7a051977ee0c3b246821ee70cc5b4874a0fd7b5f20706de65f57d085571d3dc21e249b5a0a316cf18dc611fcac9d6cf65fb73973e3cbfa874ad40e619990b0489585b6b1bc7ece2886b5332bb7543fd4fca01d56fca29c82e8d042e615c7414cbb846a2d769b81fcca44b7a9e2c57fa5cd63e871cdc526dc5f2ce1d46eeed22360d31aec0cc3b6cb321f1097d2eb11616c612f9abd3f9363c20503e8f0bdb622b4369e186ebf9592ab60a7548f727ca152eeb0e362d61c62630c1", 0xc9}, {&(0x7f0000000200)="9b5fc994668022390ede555253a754f67eb9585b161ca1030b9a4a0e374e25bed85a93013ed5ddd7fcce7bd5a58631904ae97eaaa80dbff37cd10cca63a3a6c296391d764f28fac76c13fcc8ff66c88e54c8be0c7f4704845653cd78fcc7bad2cd42daa117c886dc13fc8070a7ca47cd612167639d7e0481b0467cd46233dea07ba7afefef764f38deb3bdcfdb26cab03e340b66d93de2ceb463fc4a079b4256de036a307673c90ce0a65e54c6e5e43b15c6b98330d72031b0e9b034d308c7b278504d7826088114da1c1a8771d42454af2e4f8b814c23a3909a31cbcb4194f39a574a9332b096d2f195a0bb64c8f90d0baa107168130772ba501a34ade1", 0xfe}, {&(0x7f0000000300)="92591ffaa769533f6a3b214975b30d0de86745302fe06013dc26ed63edf67b4dbe25c9dead1070a285dc22a69e9717901cac58309aceb8f39fa017bba29ae63c2a8af5ca56c6e040e84ae7de73c42e26ecc4bea395798df3568de0d431606f0a03739c6feb8166fd355c68d86293aa9a78e696acee93f2df127893649983d97346fb3832cabc8453dfbfefcb5c0b6b63dd718ab4e1c16735c35114dcf2c6ac5026d45cfe581da580fae61a311360f7de281241e6687bd0ea459cdb5c1b56b71736a90238d07a3700c9c512549dd29d82dd4cc10f654540bd4fca5b6d8a6e2f583886aaf79c131085190585fd86b7d8f0ea214c68a27b", 0xf6}, {&(0x7f0000000400)="22a4e9c5246d22d7625613407cc09286f565af4b0e143b536898a61683bcc0045d093f8f63a12f43f598f4f0fb1ccdc7fd17f7e11ca892648538c38a8009a7dfa62c6035aa7279bb601539b786c8121d56aa8d049ac2b152597503008428a666190ceda8725f8ef0812b3ec6ce876c48a8e1882827df28635e8ab910ef8c01484394b7fe206c042e79246c8ac690c05ae71e1a952acec3f1f0a445f1bd395660a92392711fa1a58154c0b2aea92e7b328c517aa5811a740f3253a402dc73f94e73e3e2ece62d3ab9ca2a0a6d458ecc6ab643a6a4e8b7d87e9a47813d522b7d7d9d2846323e4e4531cd3c375e9e23f37a1fa581383aac63649e60e7d96bc87381630c2040dd95db5b1b97bf32ae5c66f974e98ace003a33dc5a922e34712daa43bbc86e76fe866589cb8fa1282b5ed8db8ccedd64e312cf7ecf76e517938cb6ff93c8598bd4b7ffddf7b7e9595c719f489a0af55943b51c892dc828412a40192d52048b494590550a72541d6d54c393e1886281a1b57f47f536b15a1f27e26392360ccf7ea4759d4e2c3d37c9819211dd6aea334e15b189b086aabf3a42b9306071a3c0b57c66f94de6bafb0cd1fd22a048889656a0458d693ff8562c3c9c8409126082db148b1de8aba23f2569daa955b64b45769680a6204f77189d5f24a044fe1bdc297d5bfed4b7cc113ec1a50a3c6c6884519ea12cb6bc811df9c5d3e6458efb9747e48bd238c19cb8057febc3599c94208138bcbeed9c01003a0298a103d5d004e0851870dddf9363a86b71f670550f6641148164b24adbf4e668e8ada237fa594b943c4a5529c613f82719a5bc84914f1063c5a121296059641af891c2ec84cf1ac6658c7fe04108a502bcc70e93014f9a5e81e4bda3196aa02915c8f0ebc8f1e442e5c33d8e2aa2563bf4a961aa2dedeb2636da2286efc7b8d32971b80aca20ddc08e6de9c6c062450355d7860affa75a772db952c3eabb7f3578643bac353108b52e350f957dbe25b3a8e52cea9a89aaba3def7c7365b22fb16a22ece96d4a65e39aaf2045bb01de2d207e886219b006f8414cef79fdbecf781706b45fffdaa3f2442280776e2fc7284106739609a11862589a058c659f95797839363c7e8006af72758917a76e45c8cf0358467b74ed985e960b2460d0b5d3e8ef94989c42a4905ab231ee823993095c62cc48b93293f8f8a0e9b611a07ec19a9a08c8a5d12ca63e20bcb9e89307c081f105ab7752d054fa41e7120e7679d3acff82d702784e20c30aaf4d444195fe02f054bfc82cd4f7116493b4734136e2f1f33c2a03b8c1a32513d1da6f49242a72710cc200246393dd189bb2ee028c861c82298a9649ecebcc382ba6e0580f42c85aed9d2dd7e788fd246bc0ef9ce607173b1b39b9192d20588b1bb9aa2f34c1c3488dd61cbc5f176c4b36ca04f644985d80f02c22ce2d6df18e33aa740990cd9ab84f3df6557ae980ae6278ce9b6b63ac4a18d319e5b5a4804146a7cb8b086f22cef08281f517875592c5003044ce6a6913017323a5f24997334ed8359f925bde255bfc1deee146bc116eb20c7cff66f0a2f1c8df00171fcb3062866e17206a84f4369de92709e8642cbdbeb923969f6284c7d6af58d8e9c5d7207a75b7e1ebca1c55923d438dcf8575558b422baa7eaca5284f1814c11042678d5ac76c684f88ad78cd79141b8744473d3233d4631cbb355e90921aebc8205cf8ddbb9404494d2b152bda30a76ecf8adc44118181240bb945b58e1219cc4c3bb3956246d8dab179e64826a54064fa7b110a8063025d327b5357f6fa7cee845611f62d4af9996fe48bf0d37d455b2744521e66f33e0832d564846eff0ba91e9043e037f34f1448d934b35cc39ff293fcb4ab5afb3c72920fb85dcce7d8b13676d780020f72eee9f2751a242a9e339686ac4b960517a96293845ad7c1e125693c88dde2d3a4406c6338fb69f86370b9d0bcc725039099a4ba30b51df0d67b5dee44642dffeb65a2bbcb434b9df0377fd0fc814a2b8bcada5eb4f2d5e2d3e782b663d7c4d53506e078ede182ac0cb9b12b1d9ffa462d03e990761e9fa6ba5aa7010025c63a95e40ec0e0d5e3636ee50a076e07533567b049d00218a328516b797fd82185a780841560780ee262092b30dfef26d79928caceeea3e900e8486678cc558154a58d75ce3e3e537c5a8d5906200900f8bad0151c996e2139c068129352eb3276763606631f72925c568709ce8a50327c1dfb66506312eecd8ae3f87eafb55fdb8fbc00ba1fe6d5192bfe44fd6c40dc7646a8d06fda1e98e952988fc2c930215427d2b162b1d17bb051aa61d3cbb556784e36518a853751e01f9de3b1007888522d73b367943cb2aa34772c0e22ef74a9fb6a5578a42feee83de6886acea93120dc876aef365bd1e0b62990f8f0697053c43c26a9d241cde299cbd8ea13b18cb61bb0b1f8202d6045a98e1f8665c0b9485cdaf769d596902dbc4c34bd2b81e4be342031cc9f62405b30f8d6e1ce0a1a4c2f252c293eaf9022a3734b92be39b79340e8b07747bc1dfd1c6f8dadb5fe8855b7c149ab2dcfd06d6722adb6fffdc494fd031d4032c44851e02e389d7097fdb4f89b41bfa8d501a57522d3448120b37855f8addf36ab39930deede738fb936c2ddecef7c0e77dccf9619839b0f636e2670e017b4facaf2152eadbfd414e8971593300487c736407abbfbc9f116abec4ac7637f2bbf399415801230df66acd2838f9f05c21ff2089a82e6aea2e5a3f1841cea5348eebf531987717c0f171940373786e9fd74913292c1dc92cd81ce4c43f705a989e2cdabc5450a85a8a6dbd68f47b4546c28d54b501c25ec0dcbcaedb934c27db33418e12e1ef4cc05b9accc104afefb90819237222f187a8c45a5cff0823606a5161f323d2f5d2477af5e33fec044d241ee655cc3b877178eaddbc4c3ea9ccf0a8c02b333dc4d2db4240750f9fcad4dee434a6871a24ca7b875911da5347cb1a238b6b864afa053b334a8f84ca11fc69c8320ac21dc7366aac8110ddb5053bb2eba61f3365df020a3a40f5c28f785fcb1231b03b27e414112bf475e0d4b9a9324fa5a929602c4ca5342492809462eb3e33ead061ef97890df95c5876140905a25a8f50c3499c7e41be3c21a382cec592d6786a8a5b11633463f396d6709e9e89bceef42843d23b05d82d8479a7949902fc133aecb6228bc85202f2e0403fa1be9a8db8080c849330ffd4073e9238a3b21e223011f14a65e9aaeebcda997b4ee32eb5372ee4bc0106bf5b5f8942ca8e0d9e7e301cb08074738293b144eaf723cf26f4fab0652d7fe7c4f6723219ee205a4a01c2f7080100b583609c685192d7aed5083a69969782d76c38fdd4dcb61a6ef51c9d95bd7fb3212ecec3b383552970e58a11dc99169201ca95167c59d99fee8fd8bce5b590ba620a168a3592c15c62f756b6de4e1b7deb1a9bcfc57a88d5a3cf9fbff07b9d6094b74f6c27d01f30eea8e7741130dd7ec2099cc87654e2f0c689c85c5a24e1c63f37e51e98a1844a153f8599cd75371ea482ed74e01970e71b0d10e6afd7f0615c7086f220ffef9744f6856492fdd93082232f3b3bea309bc3bdbb175026777eb19b3eec490d477d329e0353c889191b576b3ee22042f81b162c41de634cbdd8d913942f8f099d532c2a792a947d68eb0ee13ffcb5b0304de64c8bba0c72b3ee5c9473d2058e41b963abc824b4e644c15c5040219108415bdfb7f88a6d5b1e305721b927f5f84d106bc0f830609b9fc72e27020eafc7c07948673b564d640a8f8cdf76d1452626ca38aebb1bfaba986c0cb28f162a0077a030a79dedc1030896f5342c76130d3593092f2b25faea80c893df46c42649dc88f920a3b991d877481b531a7a8d8b90a5dc7ca90bc22936ec00702da7ae01015f5c46efb0b1a189483aa908de49a9b2b326b0a63a46cc24a5a58113179683eb1dcb2fd6f7542a76d739de7b955f34efe2e6530e7e50eca4b92cfdbcd26f17d2da53ab089dbee9ec4d7f5af21552cee181c805206a123587b3b106a81dc72392904f3f6555d0b709214d11d4104cf551eed8740483c69105f4ee209bb2644e21f3d785dbea25ff4c3e6c3b0d1abb1c47247a94ef276f196c14ffb271427ab0764abeea0104f0f1000349b1e856f9a6eb8fa04d51ebbb6ae3e2547351a8fb335671144a71a223fbbb4dff6d4ee767bcd8167548d1f7746895eff32e320142c927a48815bcc0d37aadb1f752a30a477b734d6e83327042a87a7902a06996d4571b7f35b5398ec162d398f6170a634a6f5d7446b30dce0edd2f7452d668ce48c28f5672c67096b5e43cc96ce4ed4af7a5c4c9b6b293f838219948e0c6a6f1fa4235b67cbeaf589332b2d3f1021c5573f361e809d934c602e5f890d06ccfe22ed3a5fc8ed8ebc0421296e43447f8c91da6bad796d4a3f843b9e78b446c5d1ffa8d8125ac838b64ba3e0fc7801d1ce4bfabbb5491eae994633f0aa3a99b2d3d6db3276d40bc6352742cf2353893d2004c081392652804958259dc1ee479beae1729e4d1dc3bf75e6e6f593076c84557374cecba1cec7f269868bd17ae3f9edfefa918b7de6dc33f5282a8ed11bc5628098fe9936570561d5c7331cbd9095152c217f4bba4a9d98fe45d27f7587981bebcfb21542de0c1244959dee05b753d2463b20f9e1be490dec1176414a88f4c8bdb8e658875adec7b8136bcc0a8a3b90384c43ebdea5a6c25c0af79b49bfb41247d8af8023315ca6181da2ec3542eb9ba567552ce3ef999f6778c2e2349d93b198bd196ccbd2513328c66b84c65475f8f5cf8c486076dcbf812ca812169b001ac54ea607778aaf1ec516ef72cc54cef8c0c82861f0faa78a87e5f8c8a0eabb07ffc5f44ff6261167587fbd0b33be0a1a923eea862d1ca8de55c8b8b2920c899fb937908bd45d0bfbdef74543761127ab9a55940c43fe8c6a525d4a72952bb5a49fc36dd834ccd953258bea798af82fec0e87fca987d7685570e6c768b8dc1504af9bd97a4f9069594fac84f8c7aaa228ce2037c459eaf92f51004a7ce4d9a6ca4e14bc874d6eddc053756f75742da808f54f322edae326ecfe62ee8a4234bbe008ef801ad411cdabede3b287c64f582157b890a2db76ca49bc4657324ea6e91f419b3f9a9814cb798d396f080f9a29783dc0b2809832dcbc916b92015214282473511b28b389b560429bc0361e800a1fbfe8a90ed362803952f1442f83ed192637f6e7a8fe834608b8e66e05ce6de8ddf53a25bd3d85e3158982ed014fb24a9bf5feec63f3775b2d19f4406cf857de88479249ea540d14579fbff3be4447df73a6560bde2bf298d29d4308840c47fe8c80d13942e4fb010095f4ae3e02c71934ad90c123fc0721c17b0ea7b6b053c7f7764de8f63d0f064f40443f8c654b00169c9debd89321dd62b5ddd1c560ddc7a5c5dfa03a6e29a2d3e97e0441903a9028a1f3e86986e044fbb3327fac1bd749e701883b3b6472a9ccf1c063270f5e6a44f10acdd281e8880edf5a345ceaa2782f5b61942058e121b88b037bba900463e63023193ada76f995d0fc389c8c8ea56170b63dfacd5a4c126a39370af6d39252f76e68cd3f30099857f5dd79716f89bf8e8d22cb989723ad807bc9a3a2611e819b0ea6f0908eb80bdcf406b355c5aa406b81464cdad037dd59c2065708f972a3d6ee9ac175032cd96d8e31c013133fa03cceb7fbd8d27c20b8950ba86ec74ffe5175212f68f662597f53ffe2fc0de2ab729716ef40a4e676", 0x1000}, {&(0x7f0000001400)="3ac339f3812e07d9bd54490ae4468588b59d8df79a6b31aa2fd71f8b2fc0ff4f66f01582676832b8d506477dc75ed4f78e39c0269fb09d9d6f40ef35b5e17a9b378931dcf883da028bcbcdcbf97875b82ad1b287396428ce27bd7a17da0e29d61836906cc74418e9e28f36a2752dfb79efe8380c58366352f31d0162f7f7bf877ad1476eae51f1d0120b9b0639f61cc1fd594ff5ea0a881144430979c6deae282c1f5a8fca5bcd27bbdf6411b0e7f083d93dccd95b424d8f3f226d4b4a776ff6", 0xc0}, {&(0x7f00000014c0)="e91e9286a9f9d68f3079791af825d71318328b80fc6e23f63d7fcab2bdb5ad2d3977962376b57f1eda3407279bd91556a4443976325eb44b0cebdc6d544f9447a9d7a95ffb0a679cd2ff6acaa0b892a1c13b1478f22dcb68a1a9a33a44cdfd7d009df0770fd52270f22873c8835caeeea10239a1749dd0074812bc0d12fa", 0x7e}], 0x6, &(0x7f0000000040)=[@rights={{0x18, 0x1, 0x1, [r0, r0]}}], 0x18, 0x48001}}, {{0x0, 0x0, &(0x7f0000001700)=[{&(0x7f00000015c0)="1d", 0x1}, {&(0x7f0000001600)="23d7500162489addcdb8e0ef36f6fe3631a7e452580c32098cf3313c0a297961ad4c1ecdd9d462fd152a776ca2521a2f6f368d2d3584b567f7f95a44d15bc493e6f5d85a922dcc185820130a12ce988aea2cefed809c001c9cb462ab14dda1b20a7f5b886ab61b021c46b00c83e76de05ad5d080775423a29bb2b025eff86a6510a0f44d3c9e2d8f9474a82b2f2436ecc704b9ee0f4da3cc5bb0d0ca3e3b34d73f53df66c3ed6d57e9e0e220aa143f881459f0abd5da9e792cc17121a9b20545982aae396830f842", 0xc8}], 0x2, 0x0, 0x0, 0x90}}, {{&(0x7f0000001740)=@abs={0x0, 0x0, 0x4e22}, 0x6e, &(0x7f0000001b80)=[{&(0x7f00000017c0)="3261c866bcef0f0738fe676c4661013f5ed839e583c3f5847fd2565a1f2a2689a5b66391c66e9fab6d6c34558180d981d3ca9dcd0847df4b78659e635fb959a089f96860ebc076cb5019114bea8e0dc80917df90c5aef5d32e13b24a3918f9034fafe06db2841583e3fef80fc400180d45a62682f62b41a33a5fce0216d27db63f764a972e51944deafae86ed24fa66b1913fa", 0x93}, {&(0x7f0000001880)="ddcb551c314ca6e28c135a5d99b874ad7e3fee557098b239914517f36e429cd44b89b30147952c74ce74", 0x2a}, {&(0x7f00000018c0)="4c34aa080a1590198de21f893039b7cf58d73ce929fe5e1b9b03ea82650b17f347ba96f9bdca37d119261c5d5e6e770cc1f68ba8851ab3b460e25df10682d398d15a578748c8422669aab98629a01e3e35bb0b8456b110a8c66750c99d115d4304863c569646bae6cf4d4cf58f2a4e5a9ed6936b1cab0a5cab6456a24a74fc3d665164f5ceed7b9e40d8fd8a60e89cb67572e516c9bf2449840c85066f78bbdb68c2103a46fb825a1f1c79b2cb205aec986ed0", 0xb3}, {&(0x7f0000001980)="07d549102648b2eb135e07712c223fd96c06b3556edeafbfcb697cd4397b0dae9746073bab2308692f3e7148707df8fac453bf79db66427b5257f3fc450943d3d54a49f344665d5e6f13814d", 0x4c}, {&(0x7f0000001a00)="139e2e3783ac11cf91fb0815f84887a9bd200f75164cd5a53255fe80645a2356095833eb1b7a3ea507a6caa36ca98f6323dbd8c56662ba8aee03ebb5ec25afdf71e2b76e4ff1aa94d2de83699c9893bb20089506d06da6f38189ff21566520c4dfc56998713c2b67173d1eb4252fee5bdcdee49408b57dedd1398730982e74a73ae875041e4bfe40db1fbbbcd84388f3bbe0f3f811b591bcc2b002736582ed4fc7269090e50dc46448b2a05e2cb795b91952dd79494203578283ecb3060d7b6853815c56c9eaa81c7d88e0664e50789ba578d0666dae0e67a9282c543c4a04cb26c0e3", 0xe3}, {&(0x7f0000001b00)="732d1f48c004203e43ac0bad0f8cd773deb6f15bbf097712c9ed41cbe40e490b52ac0385f109ad8f53de43e2a0440900625145db9337678c9571a3a68ee1c4bcb8efb92c31ccab4125", 0x49}], 0x6, &(0x7f0000001d00)=[@cred={{0x1c, 0x1, 0x2, {0xffffffffffffffff, 0xffffffffffffffff, 0xee01}}}, @rights={{0x28, 0x1, 0x1, [0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, r0, 0xffffffffffffffff, r0]}}, @rights={{0x24, 0x1, 0x1, [0xffffffffffffffff, r0, r0, 0xffffffffffffffff, r0]}}, @cred={{0x1c}}, @rights={{0x20, 0x1, 0x1, [0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, r0]}}, @cred={{0x1c, 0x1, 0x2, {0xffffffffffffffff}}}, @cred={{0x1c, 0x1, 0x2, {0xffffffffffffffff, 0xffffffffffffffff}}}], 0xf0}}, {{&(0x7f0000001e00)=@file={0x1, './file0\x00'}, 0x6e, &(0x7f0000001e80), 0x0, 0x0, 0x0, 0x4000000}}, {{&(0x7f0000001ec0)=@abs={0x0, 0x0, 0x4e20}, 0x6e, &(0x7f0000002540)=[{&(0x7f0000001f40)="e8d2c3aa1df1032adfce5660b682c9124dafe604b8e4a2bc2ffd7252deea621647eb8a534511086f67b5d22172fc11888022981b8f991e38109db1789767aeb03b2f70fa8e201198850be5448582a6d66563c7744a238044c526281d4d9450cfb083b967afe305d7e3c928e981e551ec090c1f95fe9838e58dca07", 0x7b}, {&(0x7f0000001fc0)="d5762a1916f1629d709a205186c40bf13c650ba174f11068e0bb6cef331350f4c5ba98f635bf9d833612b7cd47d90dc04a8bfaa0e9a0264e8dacf4469caa7f048e541caac3bb8318f30494fda66f95ada221a40d613f8b398d6b1fc34728b60bbe0413b741b8c0e88927b944c27953019728bd32d12d5ac10749fb3a1c4ee35da8c5f95ba2f4fc8b9454f636d64c090710423ac17e37a815dbe5c2", 0x9b}, {&(0x7f0000002080)="14b6f6", 0x3}, {&(0x7f00000020c0)="8db96c3112b7496d3f825541991555bbe19f7394b82d8c3b36b7254845f4b707f2ef49c150ba8bf81dee63f6476f6e82acd5ca", 0x33}, {&(0x7f0000002100)="6ad44ed168a0d66d72f80e6b05e8a3b997cb8b2bdcdca2be3a3c7cc6e476a7afc38aaf88dd89ce08df595f83035a069a6744c8a995794b9d05af913911", 0x3d}, {&(0x7f0000002140)="ba40ddedd0d4312c127b6e9840a85676a1d46c2ae2b3dd5d6711ca294d38d944a0d203c6a2303175c5f62a94d1398c22ca88178ad68a660de6fb1095e49c03882d9094df04f0aff8743ff228788f2fc31dcdadea68ae928b387d52313c13cb4c6b3b9c21071d52099443cc7cb0e25379c251eebeef8a37fcfd171ed81716de60a904eef5857377d2df1dc1b0baf73a99a16b5f3952dc7651a741b15b16a5694d240436f62cd16a8c20e13b8e00b1968e688b5b315e422d4c756a3858c9cfa5fcd1ff8b790107712f42322cc9c6cd3cc50bc034045431b3231c59fcc74fa8a4d3b70a1d6a153ef2eaa9c490c2d7ede50f6596ac2284ef12", 0xf7}, {&(0x7f0000002240)="75ef1c2193163719bd5f11442a87cc75aa1f5a230d8102a084626f76aa978fd9f0b8bbc0ec1c9dc9f2babf33838ee121797a0c520fbc0dc8bb85abf31c3a82e58b292d091f19ecd1e8ce25ab019026fcec7233553623dab454a0d14056bb9ae9300291594a9bb251e1fdde2d682a816e3defb472268219b2", 0x78}, {&(0x7f00000022c0)="51a84ca97ee3f2cb3e8e47c05efc6ec37ede54a7c0d3a2f10b1b725d507383a228951bdf405c4dea47625a5ce894164511e08908e8fa7244a0cd2291e67e89f8c4fa28bef10e954f07fa91c884da3e8e070826a7185e35e45e249c33a84c9480124aabf0c069d5dfa2b3ff99c8c448bb4ad88957aa1369f9625fed40f78c73abaf5448a58b6304568f1b714a9c4dd16acae4c9561c67beb16ddc1e0f5407205558b49909794a7432406077555072ec0345e41d8f9e44f5a05f7a84e729f5cf9661f1bdca5de75ef1b402d347bd51990f7afcfc94e2688e8433a3875eb47b7e477148", 0xe2}, {&(0x7f00000023c0)="36fee72f4cf163b7a12474036c6836098170aae8d0eb701f7bc3b1bc8ad03c4c978a278eb6c7541d78b7da14c76e43d412b52c586058d0d7e8d2f8298bc44fb756b73ef4934902f230777452210a5bd269d5dc190b802709d2fcc4d79c19065a1936860fec385e2e0885a64acd8e6b8a6ebf77a381bce3b742c9c8c604053ec6f1764cc2bfcea0219ed108f8e429c8b27124cd1a3b546af9021724319c7fca09062a0e62af83f177edb845163c7429e01964b5ec1e83fd", 0xb7}, {&(0x7f0000002480)="14971659348f75318e5669ad1e9acd0928c787f7cb364311426f056c388a55c61eb76900778c65949e4432318e05930b0e894c9b825a1fbe4cd7c36769f3dfd68821c9267409d185b0174adaa11b148cc372f8157be9d79121f5b04f92e819109b1f9bfb4127c23d3197056eb563d93bf6344a6cf1b5c7a4dc374bc49010a224aaf64e4a53cefe3fbe4c714d2ab8a50e7df9bab2c13c2a890d75186dfd569201b8ca0dbcf38bd5fb2f26b5be", 0xac}], 0xa, &(0x7f0000002600)=[@rights={{0x14, 0x1, 0x1, [0xffffffffffffffff]}}], 0x18, 0x4000041}}, {{&(0x7f0000002640)=@abs={0x0, 0x0, 0x4e23}, 0x6e, &(0x7f0000003900)=[{0xffffffffffffffff}, {&(0x7f00000026c0)="1168e28167351c1e14a541fa9e14c46a25ff5b6b3074973dff0f482804bba468c47c4acadb85406a6f00e57c3f3efb663876696baf7ee607d7e35bb41dbebd7293c45a8fb3f39d566a2f4e1b737278b8350c23b3f66e9f35fcd21011f640c0ab8af65ef6fd3273291b783fba62d1f226e55e754e67490d03b7c549a06ceb2043dcc882f38c35592ea93c6ae4770b028ec65e0ad2826c069f04d41d567f3d2a99861d639cab1bec98aa32709ce8f114c9d913b858de0a9282e1aab19ea6f3e32cbc5c827261d4", 0xc6}, {&(0x7f00000027c0)="37ed7fc6fffa6af83a12b85a677c259f7b0704243244c574fc80e3001125dd4dccb86db4fd3f691ece725c5f75e723632ab2ff9ebdda4b1f04aa925d5fbb1803b21d30d36c6a631dc9a7d715b1370e4e8b19931ba86f96d3fbdb7576b09ba10ffbd420c41dc2ff81fb035ab613e36e73f5a72f4acea986d5f7ee0cec6c619d4aa1ac22b823fd84630a111c4e8193bff43c3847009c3b2a2959da3def4ad9983714f642ad760ac9b1b404c1a58123ea16d4be55f6d6fbd23f4a13626deb46b3d064dae1b7c61fb0b90d56d95d3954f4fcb265316c6fd639d2d8a079d8eea490f5cbf3d4bf80b4b7ff1abba8cfddaaf22bb540fae983736fe9985f8f1b46748661b733d268cd68ecfd420d99bc3c75ec1632fa5f78db7be4418a5c0769a9f9643faf16269f20746c87b22d17e86ec788b788c3ecb820eff530c3aaa9a72405f8c587d4a94ee910a94a5fc2f704aa8974be112f17ea852da68da6d71def83af8ea14f43e4a1d79bfb6cce2a012a3481a4a7c0250222e1a4054a50a3b0ecbdfe56465fccf6c7c1dbf9030ff4f4f72e8741c351e9533d63ea0fed458b79516b9fa7643536d65bc406602a85c40ab28688d8a87f3cfbfd52decf46fc7fe047e6ec93ebfb51fd94f13d3349a9b5c3e053f31e962c18f79f7438b375139d0accfa90be234bced8a4e4270b46f58c96e87723a16305dde914802b3fad5e10b9da4306c56b6b6fa2ccdba959ce5fe7c7d9294a2b4cab589fbd881145b5803599cf166b1f298116726b7a6a41424b5dde747049fd395c069663cab9fb89ea4ed833d50671bb2825abdecceac809e91c89b43aa6e7a277257b9c3115cd06ccd124cbd5ac2fabac90f616e5f026e1a05f31846ba84e09a8546816e69d10f7ecb22a134f12ce71602ddaff624bc3d4bcad0325d3e06180f95e6ab3a20dfbfdaea25153b2478af3e181bdbfa7c707d8941ca12a897aee337ec9527ecb5bda5c05fa3bba4d2897d47da00019bf5260ca495735f42a546088932b9bcc3e77122a37b5a22f42f13dbf434fd11b9cf99bbd8630ec8b5fb984fb5b92ce75df0f506420337be1c10f5bb659488a5f1f4addf4b21fb53afe2d1d9a29c06679c7aee9a3e1c254b21fdc1a90268c0f66dd39ca530c5992d3230a68d18eb282e141b43c19e2c535cd19013ec92c30c6171882b81545921eb6ddc83226fb220e2e523cd460687937d028c1089fd54a8d48051e130f13668e6141c1c361a60c08d191c931b13ea8c9db7dd0e7fd20cff4888b5225c224039b99dad8c957354ce5fbee577c877f6655282bcca6e855a2ad53c44b575ac0a5a7fcdc27059b23f86b42ed4e55aeb0b10c24d9171c3e76626a2a19c6c7a9860724703bf2d057f97fc2e2bec59cb06ef943f2f2819bbf5489f35ca8377b221e2badd0b683b48ea9968c3405abe6e149ff82582b071eba3131ee927c4039d5292ae502699ab7dbf25545dd3def90da7393669c27911918eb275558b6fbf79259a4a2a7a53e7347a81e2c849e90aedef016258d63e22db3a0675666084dfa32a679adacf1bb6444a616454642572f1726fe6d61bda3339d969defead8ab0a33483052611a530db3f7750716594ad39344ac36a4cd60ea44771877833bd30c7d0e1d1c39247b094304b733bf4286dc0f11fd3771a04c93f27ce02bb2e5b76065a3081a90ce126e1d0ed03fde699152232ce5717ca80cd2b22c190e51331430f50de23678cc2721c592de7951c729beb35cb11b74830a643085d47e252d8299595e721f17d35402951ebf3d9569a43429c5707e5f1840779610665534fc0c847afdea2a0bd3193a9c8384b0b9ee569c3ecf65e4332a006e347bf36f052f9ebad931a88791f0871aac4486f493d0c9252f48fcd4e599e3fdb696793ac97d394e350edda18546e8dd89466edc367659b21a65215a6fbc4c71be8c31756490204be236dc396b8fda3e5d5efd17b698c24fecf59ce3723d35e4344006dbe54f249de0ecd77f5aaf353279f7faf9b83778edef4343ea353b63443674cace4c42fb9c21a22fb41539d3728f148b0f7defba18c9dedbd94351d7b144679b1944fd7ca1658abb46b23d6c6d78be35eaa01a65c56e9ed7bdb348b4254294876705a3ecece2b7f74af64c4d62997d6bcf6d7ca90919c0b39a4ae3793d5737cb1302e1c0fef5cf6c6c5b871edc9a8e7ca9438251bb352c0a0f0423ef57b0ffeeaec980b5002246f4d62026fa5868b43ed52dcbcc4e28d2759b7a138c466f3537e39cca4bec445af5cc8a684e72a64c23ae3f74631a97349136cc8c65a20fbb34015877af2fdb454b24c30974276314d20eb35420fabe759d7860fca8f32984b355ceb29054a890984489b0b07c8c536d8b7976fd028ef61264e33cdce8d3e14e61020631aa9368f1f0dbf19fa7ea4cb91111a47112c2200f9171b96d70092d0375e829f9ee60f9303cf3975e8ba8ba6fb2738fd10fe512c05f141328b0e59135b8267426e2d38bba11e12e52f82af9c847a98b80fcfde0ee69ea42e03fde7aa81eb524dd0fac21db43695c52974b2877aa7d9433e42bda40e8156759d009b39ccddc57b247c9176282dc2c45b4e98f783f76f33e685cd46268592b8a2ade3c326303af6525ad9bf28c6ecf1521642e73cd4d41fe75b1b5d955e5cbdfe4820da257ab4cf9de18815f9d09e6c60125936beeefaf13eefe41297e4ec477a9a35683d73e510efa2c67fc2f69e7175437b078472597d7bf9ca4657edd6cae6713068d3608b3c432116b9897abcb19662f42be84d75bb84c10b5c260ad27625e331bc11ea132ef54489e597b204cf7cacf0644ba0daa78e5e4fcdda37a287b93cbaf955501982cb1fcbd911a0e4946a2a8df8beef09e6115261775f04c4573886cf4c71489a427a29f2d9403a2bdb56f536a8a250d6f3ca6ed87fb1e95400d97ffe742bba7f935996ee621f32682d5d28e3e9e3de7411c21e5a0d416cb9051874871f77de363df2120541e58b5a68d24a74a0b9fe0adb97ee560353c70e80c6d59304275ae0e3ff4451e2234dc03a87a72feb6e6dafc072e0b229164c0f83d77011442356fa5af3634f3174aa4491ddf5de12cb1a30aa037cda1037742f52d868a6f94151170841c199271f0e62cdf55b437b5cdab6cce5b67a2a26523106d2922634317fb868b94915bd98669214fc572062a7572c22daec9e616b977c93f204e835b126c9dff21767c4166532ba508e6383b0fcbff9abaa80e217457eb321983f6418ef42249f47e170751e1ecec5dd42605d9b8fbb6eac2afbe06bc0f2ebe9c57df724e075d9ca01ec3eeb5f3ccb6516f99b64a78091bab2f4535074f33ba64c3f7c4b10e24f0ac9fb1b6441d887c8011b9a009635fdd020ebd099a95e56c9e5068c0c4bdbc65a0c1a49f99c3805796d0dabf09e2f26ace8507389c36d3b22d0fcb923d8c43d1456ea56d9005ac27077953b928ad0d0b0a965b70762f73fb1b90452f6008304dd2bf2e7146581792b5fe3ce15610e636d2f3e757bcc6106162568f601aa44be7ddc740ab4bea1293bc8d5133c60f7489d5bbd9274ae1cbf31cf5dd68c680bc5916c6167fa08f1d77d1c7ca81aa445c9bc6dcfa1fd20292b90e746e43c38f32ac2640a5cd407152615b417a18e9ad6e5aa94077b579434a20799644a70ba84fff881c2ade52b3dafec5e112ed1dc7ed41997dc23e1730879765e6c49a4e772a05d83d4a547f4df1db9aefb2e86313244a072290fdfbb7a5f570482542443106ff0a961260c6500539d62875752f0063aaed9bdf3ae0a65a803ed98c82b93f7b6150f0792dba710961f1cc4db3d459ad2871198470fde28df598fef7361c08cf2d5f28ef111703531b45263025dc1a8716ae08fc9f5a52c671c2f774ebef96042b54f427c35a64418360183c695d22d3560a6e2b01a3886113a7ffc2c0d87645a97ded067e7067c028d448a6817367b889fa193d5975ce15a8e665dddcf22e61762e38cce14bb15746f0c44219b092eb9a4fc8a6c9ddad9a74c49f58769e754100b583c6e1d6c909bc26a40cb76a385718e59ed5a2a791d4288221f37e8b1ce863bedbe31569bd396ed0a1f6909ae931d213fab2a140b16775d1db7da60aad07a2610f7472e9e7157a1b7621de3efa1bc8aec99dcf70f50fa291b69bc47d8c9d04c8b18a952eac5dabf0d78fd7fbc25a26919626807ded2c6d25e891fec2f84a5ac822d0c98d865e0d06519f019a7ce5d13c1d3289a7fc70f26baee7f4e3988027d753a79562dbaa5c419db77ecde101d3b15b2cc4f92d176e932cab7af5a0f318c5afdbbb0750af698dfbb8eb6ee6825040d95d7cade463dcb81a770789a4b48a49298b84b18d61caa7224634ecd8653369158f88a85c13b99d7f255cbefb79c9628f73f2b3ef40560216d21f8513ff17bc8faa11c2a1bc25b2040dd23826916ddefc1e7631c508a6f04da29ec3e8b8af40b9fab617fb7648e23fe4ba20cc965813b9c7f2129312c2b74cf367547907b1e3d9c4edb633088f11601e2d868dfaa4e4d8a7ef834af3d483a3a4b88c6acd37818befa2c6b544c392b472de4e25c73264dbeba5ce6fc9c65cd7d1f411e743517d5920cb81337bcf562c94e332a07bf0684e7df8d9ac36471ae12416f546436dc3b78d883cbd5be836ce1f70bd494352c32b89e736028f083ae0df7fd4a3c864324a20d99e944548678532d4f43e14e1792fea757f0000a903e840542110ba639245c8e5d778fe23c21b0d36caf004b92891d9e9ede9e073d2dae8827e915283378d90177a1503e856c4d7ab95cbbd7c05c20d1b7982ca916e5885803af0b08c861c559345466eef0ccd2994cd245ee6e70c9a45686f42670c2459954c0b67e521b982a49fac07d76110c292dc967abc34aaba9bb6bff02f331d188865dda6e043ba7b4f81577376f677222fd474ce8c1265a0585fff23146489450cc880f8db2d167d2c5635e49ba2da7edca21989a560f1cb09b397ae9745780ad2de4e6929b9051deeeee94fc7fdb6a6a1a3934a0b1e4d4cbb3499684bc0c5a51a331770d4e8eaac07d56370b5f0ccdfa1781efe91870213ae5ffca24c9a0686cc51da1050ffabb99172f0f26c57eadd23eda13892b63ada2ac181d540c07999db18ccc4b5155556d2170458739f7e54de2272aa1833134998672a7e90e30eb04691171bfd55623ebaffdc5e83abe6600f24ee7bf9b511a10720a61115115201156d3258c8fb769543a27b28dfe731811791c5d4c97595ab3694e00309ee343048a6dbae364cdfbb7c3d73e5a23340f050aff74a85dc6130ac158afe63c39816e5188c240e450d1bb853d5470654494365d95ed80b94f4088fbf69f79ce79641d355aacb6d7e25de813dbb26bff864a59060d8f24e0261f5a038e2bdd36ba16e320b71cbcc960def1270785cfb0033a4f340fb4ad4b0d48f7a769e29cd8dc2c0f188bb707beaa7ce95e1dbcbf8782feab86dccca789f7da3933ea8370d4e7c01d269300ffb4944c217e71d6a0b0ee0ea56245b1e87e19f3c8cd5a70c93547ffe2982ebd6dc6253ece72cac73e61515e3a485a5f9942c3fd8bebb0086626cec3a8220df520d3f7df7787f1fc9d3301f89cc0671ec8059d460822d82effbb9d0b400a9e4480c5705ac264b97c959cec6e0c5136357b3f959fa34ff070453fbb9c12a415f83d1a131f575f384790a31ec2de30a76730b1ae0528427963d542badde90714143fac51698a73cc64159acaa281e69d067001739c095402eeb5adc10ec0a6e06b835087db176f8ac58b34a", 0x1000}, {&(0x7f00000037c0)="a7dafddd3137664e12948dc084463f6c7338d677ea2d00e6b05a39cca70c9e630b1260d06cb5b1812c907311718201c907d7f6847ebc6b927c8d7034c3f41da466b43c", 0x43}, {&(0x7f0000003840)="fff6b050682bd1b4fa1d6d943007718da612ef465dfd5f33217647817d5edf76014ce4cef90ded9be2347e95f6f93fe6d04aaeae6cde4c490f2824587109481aa8b9fa555f8a4dcafaa198ca2702d6b0a7788785f58f790eafd67253fc7c20a19d1451ca28ac687f0d71dd2e61ad163814fa115e8ab0395bc8e619bc5938e34339161b04998b5d32029a16633f6e82af23f5ff5075", 0x95}], 0x5, &(0x7f0000003a80)=[@cred={{0x1c, 0x1, 0x2, {0x0, 0x0, 0xee01}}}, @cred={{0x1c, 0x1, 0x2, {0xffffffffffffffff, 0xee01}}}, @cred={{0x1c}}, @cred={{0x1c, 0x1, 0x2, {0xffffffffffffffff, 0x0, 0xee01}}}, @cred={{0x1c, 0x1, 0x2, {0x0, 0xee01}}}, @rights={{0x2c, 0x1, 0x1, [r0, 0xffffffffffffffff, 0xffffffffffffffff, r0, r0, 0xffffffffffffffff, 0xffffffffffffffff]}}], 0xd0}}, {{0x0, 0x0, &(0x7f0000003f40)=[{&(0x7f0000003b80)="70b377c70387f644a405f4f2e35e721b7ba54c302fa4cb17a4bc8766e7374451fccdc1a1c6078e2fc17047", 0x2b}, {&(0x7f0000003bc0)="e6125e16d7a8df93b40f4fe2a2f1680aa50a616a96a529701dd568d52b5977a804fd8c0fd5437bd7e61314cb97c9a47e06edc44d096126476b022ab6ebb41765134232aec9e0ceea89ffd2787bd9f8e3", 0x50}, {&(0x7f0000003c40)="d6447e435b1de4c192b66a27a92b8b7585e864a464", 0x15}, {&(0x7f0000003c80)="129d885b801261fc14813be0a34b56d6978c91ce3f", 0x15}, {&(0x7f0000003cc0)}, {&(0x7f0000003d00)="eb19896a32f2d21ea6de7080c4d23e451c283764f16ca7ca0da11ee4", 0x1c}, {&(0x7f0000003d40)="f0b456d22c3170eb7297c7b2f3d042c7b6b39636a8a80935cc2c3e0c455da9a259260c91be5e35b7d8aa172375304515e081cc995206c07b9c77188a9fa4c077547d58994864e8b45d1378274b33063c1409a5e9537742343a604b203d606ec24b9694bb4bfad975fbb2c37c70137ed0e4818ba986a293ecb1b946193da9bcbb2efc24009ff0ee098d6df07ff2eb7c39a0eee42faf94e96435176ad6200ed15203ac993743e46a80736f7b25c69162f3eab0b453d787adebb9210b22ba11e6040ba7ffe97d53d4addeba80fa29388171c595b0c5065273f3875a254c0d9dd579953fda006e7cfb768d5abb3b6531116b9f51726d10b840", 0xf7}, {&(0x7f0000003e40)="e779fbbd622064ca2c25a9abef22da29971cec5f862e42a2d40ae66af52a2458448f496655aafedee5b7664ae8c21119267849cc7788ae2b1332e890d866c36da6f78e6827c1e059c9ca969647b7b9e8fb343232cbea9e39243bbe3fa33ba32aad6947033b832de2a25b53abfb95231299b7e640f5aef240da130e53c6fe9e162d555a223bb24500319269212dbdaea89f718af02183beb5f268a5a5e4e0edebdf0cfa9737ad6844b9fbd46c1b4f4b6d5cbcd3eed073180a3ac9f8b42c4aa4c4568e84dd690bd27e65c3a2ba299f54c3", 0xd0}], 0x8, &(0x7f0000003fc0)=[@rights={{0x14, 0x1, 0x1, [0xffffffffffffffff]}}, @cred={{0x1c, 0x1, 0x2, {0x0, 0x0, r11}}}], 0x38, 0x4000000}}], 0x7, 0x20048000) write$binfmt_script(r0, &(0x7f00000000c0)={'#! ', './file0'}, 0xb) r16 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) r17 = socket$nl_generic(0x10, 0x3, 0x10) sendfile(r17, r16, 0x0, 0x100000002) openat$cgroup_ro(r16, &(0x7f0000000000)='cpuset.memory_pressure_enabled\x00', 0x0, 0x0) 01:55:26 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x1257, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1976.805454][T28226] workqueue: Failed to create a rescuer kthread for wq "bond1023": -EINTR [ 1977.119175][T28245] bond360: entered promiscuous mode [ 1977.179888][T28245] 8021q: adding VLAN 0 to HW filter on device bond360 01:55:27 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, 0x0, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x262, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) 01:55:27 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0xa203, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1977.247128][T28243] workqueue: Failed to create a rescuer kthread for wq "bond971": -EINTR [ 1977.468903][T28251] bond918: entered promiscuous mode [ 1977.511720][T28251] 8021q: adding VLAN 0 to HW filter on device bond918 [ 1977.579972][T28275] bond971: entered promiscuous mode [ 1977.596113][T28275] 8021q: adding VLAN 0 to HW filter on device bond971 01:55:27 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0xba, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1977.654182][T28254] bond918: (slave bridge885): making interface the new active one [ 1977.666146][T28254] bridge885: entered promiscuous mode [ 1977.676403][T28254] bond918: (slave bridge885): Enslaving as an active interface with an up link [ 1977.740606][T28258] bond529: entered promiscuous mode [ 1977.756798][T28258] 8021q: adding VLAN 0 to HW filter on device bond529 01:55:27 executing program 1: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) (async) listen(r0, 0x0) (async) r1 = socket$inet6_tcp(0xa, 0x1, 0x0) sendto$inet6(r1, 0x0, 0x0, 0x20000004, &(0x7f0000000080)={0xa, 0x4e22}, 0x1c) (async) recvfrom$inet6(r1, &(0x7f0000000000)=""/26, 0x1a, 0x40004050, &(0x7f0000001880)={0xa, 0x0, 0x0, @rand_addr=' \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02'}, 0x1c) (async) accept4(r1, 0x0, 0x0, 0x0) (async, rerun: 64) r2 = socket$netlink(0x10, 0x3, 0x0) (rerun: 64) r3 = socket$netlink(0x10, 0x3, 0x0) (async) r4 = socket(0x10, 0x803, 0x0) bind$inet6(r0, &(0x7f0000000100)={0xa, 0x4e21, 0x315, @loopback, 0x1}, 0x1c) (async) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r4, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) (async) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r3, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r5, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) (async) sendmsg$nl_route(r2, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000140)=ANY=[@ANYBLOB="4c0000023d1d04000000000100"/22, @ANYRES32=0x0, @ANYBLOB="0000000000000000240012800b0001006272696467650000140002800500260000000000080005000000000008000a00", @ANYRES32=r5, @ANYBLOB], 0x4c}}, 0x0) [ 1977.845155][T28264] bond1023: entered promiscuous mode [ 1977.862085][T28264] 8021q: adding VLAN 0 to HW filter on device bond1023 01:55:27 executing program 1: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) (async) listen(r0, 0x0) r1 = socket$inet6_tcp(0xa, 0x1, 0x0) sendto$inet6(r1, 0x0, 0x0, 0x20000004, &(0x7f0000000080)={0xa, 0x4e22}, 0x1c) recvfrom$inet6(r1, &(0x7f0000000000)=""/26, 0x1a, 0x40004050, &(0x7f0000001880)={0xa, 0x0, 0x0, @rand_addr=' \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02'}, 0x1c) (async) accept4(r1, 0x0, 0x0, 0x0) (async) r2 = socket$netlink(0x10, 0x3, 0x0) (async) r3 = socket$netlink(0x10, 0x3, 0x0) r4 = socket(0x10, 0x803, 0x0) bind$inet6(r0, &(0x7f0000000100)={0xa, 0x4e21, 0x315, @loopback, 0x1}, 0x1c) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r4, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r3, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r5, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r2, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000140)=ANY=[@ANYBLOB="4c0000023d1d04000000000100"/22, @ANYRES32=0x0, @ANYBLOB="0000000000000000240012800b0001006272696467650000140002800500260000000000080005000000000008000a00", @ANYRES32=r5, @ANYBLOB], 0x4c}}, 0x0) 01:55:27 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) (async) socketpair$unix(0x1, 0x1, 0x0, &(0x7f0000000000)={0xffffffffffffffff, 0xffffffffffffffff}) (async) pipe(&(0x7f0000000040)={0xffffffffffffffff, 0xffffffffffffffff}) splice(r1, 0x0, r4, 0x0, 0x4, 0x0) (async) sendto$inet6(r4, &(0x7f0000000080)="202179ecbced66d21004d86c4827983d6ec102ae843687184616ff1979", 0x1d, 0x4000, &(0x7f0000000100)={0xa, 0x4e21, 0x5b, @empty, 0xb1a0}, 0x1c) (async) close(r3) (async) socketpair$unix(0x1, 0x1, 0x0, &(0x7f00000000c0)={0xffffffffffffffff, 0xffffffffffffffff}) (async) r7 = socket$nl_generic(0x10, 0x3, 0x10) syz_genetlink_get_family_id$nl80211(&(0x7f0000001780), r7) r8 = syz_init_net_socket$nfc_llcp(0x27, 0x2, 0x1) r9 = gettid() (async) getsockopt$inet_IP_IPSEC_POLICY(0xffffffffffffffff, 0x0, 0x10, &(0x7f00000015c0)={{{@in6=@mcast2, @in=@dev, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}}, {{@in=@private}, 0x0, @in6=@private0}}, &(0x7f0000000c40)=0xe8) sendmsg$netlink(r7, &(0x7f0000000cc0)={&(0x7f0000000540)=@proc={0x10, 0x0, 0x25dfdbfc, 0x100}, 0xc, &(0x7f0000000b00)=[{&(0x7f00000017c0)={0x1450, 0x1f, 0x300, 0x70bd2c, 0x0, "", [@generic, @generic="bb10ac49b6e01b4b5887437b1b833c70a02dda1bc1a8181ff2a01e24cd43c856869b0928985342ba5044cf9293c9237d154e8ada0bdb3e2b50d32498d9e3c642c2be6c1117f98c5d3b02fe6d5ce17dcfee7d9f7d65cfb2143fd4b24763cb726988b32a83bd3b1c31e885e0c44d7a84cecfd946684fc74f869bbf9ce2a1b951cbf759f966353171859f5302ed3fc86f858a09d3", @nested={0x58, 0x22, 0x0, 0x1, [@typed={0xc, 0x23, 0x0, 0x0, @u64}, @generic="bb3e165b0ac7bdabef86367bffafd893067668110d1ae7a1bcbe3736938ca4e6eb37c458a159fea7bf2b54452163d92bbc3bc15147171c2bc1293dfa7a6229dd122c8840f74a8a68"]}, @nested={0x171, 0xa, 0x0, 0x1, [@typed={0xc, 0x74, 0x0, 0x0, @u64=0x200}, @generic="8f3a8995bafdc3086ec720af96005d9616771cd2a4e0eb286186722b8b544e288a78ff30a91c334cccbe376d7b25a1f750552333559e796847cba0976b59955f", @generic="a9cbac955ccc16ddd9ea0642da22044866e42c49cf282312d2ed48a15844da1cd8f077f014496e91d86fe88664b1066043945e50861fdf70f9d6923f0bebc68b8341202b1c0b69f821d63121c628bc2a0bdead1de8cf", @typed={0x8, 0x1c, 0x0, 0x0, @fd=r8}, @generic="9bd3fb1614f1cd6c93d93617f0910b6a8dcccff4ed22e398f368e9ec08379ea220f86ab1dacd98503dc3947ce6e3107d462a8f57f69e9a0ad820ed901cdd1e0e0cda5f3708e360e8caa9f17c25a515554f13a885f37a19c95e39d4d0e7858b828458910f31c4255c5cb08271f46592f15f39f432212ebbf7039fb1c2feb0ec270b51bac2ef152d4b45591b73707993fdd8ce2411b1f51bdf37f9e0df6c1ea0f92af5cadf5cf62f5297e947c55ecacb9055a6d317402a94999677aaae918a2b431296a6"]}, @nested={0x11e0, 0x75, 0x0, 0x1, [@generic="eb4d67d1a7d75bc02f3919c92843be72ade48518a7cdce94e712d7cdc1ad5c9eae69223883e0d3842f10014421828eb4bec7b1de5921ee1dbd8bd86fe7a10fb2883cf255a8eca470dd521b9ccf5b3d3263048500fef8d2420e9f803dd346504eaa0a605bf5b411aa55728ec0bb64ab81af383f5ad920332bb44ec7cca44587d90f109cead651dcb70c63c661", @generic="a7342ada65508db7da2dcf8e44b6d6c979f5e95c81611dadf89a8e1f27bc2e59b4565cff40e77f4496fc4d67ffd0f38849f2da21ffbcf169f77d0daa456a35bd94c40fb9ab3c94153f4745215e3034b48a85dd20fe9e19b898ddc5bc61e107727977f82b53f61f09a23602dfb2b5bc11c64f306636aae47d286f57cd68251a5055e189c6d018", @typed={0x4, 0x23}, @typed={0x8, 0x8, 0x0, 0x0, @fd}, @generic="5989fc9f66a535b8c627c8b3c78ca6113c21ee025347e78c7974885d9e477092278896e6c0738330a10b67f1f31940c147658f715c2d5728482b166c4b3379bdeab54eb22b5670e80f48985baefd447f8a6877409dee00de087daf87f3a79e6f31efa05c7b94570d7259b877a3e3646d3065af417b53c282acbe24b229de7d9e9c6de6bdb1abf5f60919086dd5b2d96b750baa52c052d53a0aca65e3e61c222300957dde43873e28820a7669e626cdd1a3e2", @generic="bc4a037c5a4e9b298c4358adc01afca49ba5b3235795d1cca1e6ee06be0676d4b6655eb17c0300f198e8ebca41f43e6106d7a5a6071153575e4929c8a3415479c43489cb79ce04700749b9e50895990c02ad6f4c89d852a036fca3f66c1fa2b3998e12dd0ce3bce4f2fe7bd769437aeda0aeb83451d0488dd7603aba6a460893a4f89cd76fdafcd806f10d83f4c12e1fc54f90ac28fe9b9c5c34bedf6c440370f3084e423037b6193ad6b4662c7729c9d9e05686f422206546c33b3e8c2cd2bf801a570b6b2bfb864363a24077b5feeb85b41e7761d0cf089b6d3e5a0118676deaea4fcf2a4c33212379a610bb938361d0eec0115ba61cef4ae383f88d1c25126e62a37910c15196b6b5128779ac02baad19cb649a074635b7ac6a15db1acf53019ae69ba39c2bcd1a08e6bef9475068aed7651aa3e6fa7dff432e5500ff880271bf82e13a1d519b9c80a85afc57ccda56e43227e53167e235c3aa9aec7b0f10052e01656b927b88a316af59b27ee6f2e4ffc5bdc2890b2b3bc067a1f1b9985c76677ed1a7c9c27416a6d9864404ae45ed1dda81941a827bc75ba4fc49e984b1fa7842f1541ba40d140e8e47db0baba8a6abce760ae3fe42bf4b12d6552ecb71b0665acb0e51c167d6d9eac3d572ec4543b0bc63735c1f14c704ab07d0e8112b43aefc498dce1b02825957f4a9b443beddd2fa986886fb3166a3040e87fb307162aff63dc1e29ac48eba147298ea97ba697bd5fd31886434fb23e57737ba27567c2fc58a884d3125b35648d8fc99cc7a48f7a4ffa31072dea7867ef2684b07a33ee9e12f8086cc179f2243c4d3de43104199428c57a30377686e238be3c11b88255a618d76815bf4268e706bb5e920aab5b85158fb3c5f1c6f921c6e6029529c5742b042c1c27ba414268ba895301b9f331b0d86e2fb2021be406e7dc7997e727976e95450988a89c58040ac92e2f6875e6ac4cbc9a6bcda0ab805fa667dbf4437b989da59c6c8f1413b237c19cf1c6c1f689aac9b57de98c81094550add06913edf872b3e6f352506d4879fc9fe08d67bd35a2548286a77364038c338ac2537c62c152acf1e9f35071b093186b1c50a52b835eb60e248e087c9f9a19e6baa17d4c18740292d0ea8989d645d2c35a6ad0b9aa6275a5755cc93f2b04af16407b66497c7ce4cd73295574e963985ecadd891fffb3dd69c7617fa221ad1e0f8f1b172e512534ea83ab92d7af69c37e67bc9a1f1befc95bdb613a091a44b85843c21459b53f4f4270a7924555116b5b0920174bd5d5428bb3f7ae8427c8ba81d338c5fb6fd48a33496bda1669a6bd59373d557ac9dce86b87bc04d701b2d27a1f2fcb55fd0305e452e5ff0b911b7784d70a343fd764b6465f203b1a8dc1435b22b93d39e3a42755eccf43fa196d859b823f3ae19611c45c0ebfc80aad1d1894eaf024fbdc5ae41ec367f86c48714cfe355cbe294a21f0ece615e811af25f5f52227571ad84443c21491eba718dce148bb1edc89025fa9d4942d5ac43a534453e60e95b07e434f67659d725eae65aff3422e5895e2f2f54470998395c5c1d35b65e0f87883cbd1f706005d10f95b3bb78b35b2d47e244dbb3dd35dd12f22d575f3cf8f8677b4649c6e26a6ae969a0e452ff8418e99bf7056605b100a347ba62bc53d1ea56ecdddaf5d2de9c2b074f6aeb2e7426a14355eca7acff2bc4f27d2cf11c992fb93a134fe1aca14fbddbd116474ca8b407b8cb6cbd619ef20d2772e1caeac7e6f43f370ddf09072a3c51724551515fafe1241c3d96808a34ae1f5e72d82b9d69986eb1b4d04ab07dc15664b380d50e6c14888f55fdc640378bbeee928e278d846b6efbe6d6bfa299a4960c2e7cbe42de0c59209c1bba122e4ec698ab1a1df0cd50bc636ad36e48aa34e26e324e15dcbdb62d2c5c60ee8f175eae1f07246ce8119e89ff4ef47d11b4428de3cffac1b1c8bd739d9f6851e7cecab0341f6590871db4880c72ea5b75d3416f1e62fe0d47939f8fd69e9e93755c1a345a04ec08631cdfb8e8ce37fc5dff96dccca60f95e5d0e593c379510ccd01b7fce8485b9da05938f591fe2661d97a22612963a8babd8aedb5cfd006f54d32f30961555269914407fcadf2763786f34324dd2273cb5e85434868f34d59f249a6c8f99079ddb015ac2ac851d45421f1cf19f4f2e5ad83490766f4693090972098ee7e06a57c8b6e3d44e7d4845db7fa4943363c02316b8afbb6d261d9da714a76b9ccbe5a0f92e0184ecf7e6810f9b4c5272050ce0ecc0a2e6c022455cfeff27c056cbce250649982656e1992e075549f737e025cf04e7db3bd97ceaf52fe2ba6a2ed35f170325e4c02b0bee9860b595c98c4bfd152581842af2655b4c008b87875b55dc3e7694f78d3854cf876b8604375338f27d191c24c0f0d30d7d035ba5cb41183051f1c6e0da3b333da56dedc966bb860fbb6cae83c7dd044db5c3eeb6c4fb989a0a1b3250634ff95b3715f369c94eeb354d6817bdffe0114f82838293bfd86997644b6b47220235ec62bf61d51f632e5beed03525802176ffdd380e6e34d46e50e7fdd3d841ab6d516d1df55fc6904c6534e30922708b0a536224fc521bc240126ca1760ca32c4b26be004ae9ce9fe1d0fb9be7405358c65fef345a8d32499f87c6b776478e021d26e064cb049e1b9ed91f1bd782211ba6ce0d31800128e050c2ae974b9ebafa46e8d51eca2c2b2d069f3334e3eb76c9ecb78fbb5591ff444dfdcc120a9d4a619f9c3fc6b3d9be6a803729fa51f7da530e63b888d170f627c58fb10e2304b4b5d01ade88478473f830c44b6e7dae1aa421b84c225a98cca8a1df182d37e7148efba9f502f7f5b8bb2b796e83552b4dcd39d2829ce7a6bf699dabcb8f36134c8cd38115627ce0608a4962f5c149c48359a756b932dae1a393e8d825f473c47ea003ed0d64da2480db0eea0cc09caf353ebbcca6304d25e5ebf562fe492388fd4fb27dcbc1593e9ffdf97cd844f5ba93326110c5d2aab77957977fbbfcf6a9fce8660b920e514c1c7167fa600b57eca6c4a51620627e49ec7e75e691d60fabd09e53ec997cd624449ca073e96747070be107d48ff9c635b8e702a788614668d86eb07dc72af7b81404512eda8cb4e15d6ca6ec8b3efe74207a38b54d6392784ebbec3c6b0dc2d8a8d65d848498d96ede31a0c54ab142b1a3575747c761533dc92682ba2901b8242518a5b704a9b162e979da47ba808a7f511e8759fff37ad263929cb14e18a11ef9f86c0ab4d7b3f7141f8a50947108482188ba9b54cca223ffb6874cc496fc319d29a39444c4783b9172edb9b0fc2baf10196519053f10ffff2e99e2d2a9aa0664c11142db160723790e9f34f15e65190709856dc8339c17bcc91ec3d037b7ba1a97a3865fe11286cb63d83333a07c86107ad25fe833d399980426022415b234ea28d3aad9aaf87036eae1b60195def1e684af0d256f63fa334707b5d4dae394c5aab80e436fa1a9b19ece9fb211a829f1a39c27e640d442f36a93975156338c8fcf4357d124f5b393e589e96deb0efbb909a90c2bd0378459252a85e414ed6deeb9e69a8cbb5cda07b5ee12658e93b768630671d90f370bc479161a0e6e5a48e9e155a15441fb552561f6453a2f999736bf781d08d7ec78bce43a58e8dc058c40624cf609c26accc8117ed37f1f2fc8ea4f08bbc17cbb9cba9d2ce2cca8f1e45a3b92d39c5a6a5ecd28f12c8fba40596ff26cecc2013943a04c9fa4977e3ec593406c8b2f12081f61542fc806ab195b5df37fd97ac3b58126df40e5a81dc50d612f64df1084e308d853d0dc42614b9810df9bd1f3fd33f5cfe958bd876ac46eefd00187422b19f72c9457cd887f825cec6d2a740b4a199e82279a60ae036dc4ab69adfc334e3d7b079870b3f9cfa02ffb05b5e52dd0c0c641e7d21356341f5388598d826372d7337560f9aa7fba5a9654f1617efbd3142548b833a54d6d36a7cbc991d10e9df084687b65a148fcd28ac322969314e6ba4a11019e6142fcf8327d9433fcb5d236baaa5db9750a2b9db8d397aa14676d0033f35adb97d5373354d18ff60d6572df950694d4d773572842f2e19582c7173ccdf159da3f4520d93a51dc0d11c476084d632a9d2415afb6605e5debb147d135f7d5f8a0e46ef2ecb684f197c4adf30f5704149901d1c3cce526c6486a0e3184d2376246b375ec67b069321276e7fb10acf46f2e5a0bfc73cb68753dd9baee32d6c03b5828054039acf49fe37a551dc0e5026a5ade012a998e4f7db9863016e7d58f2d9a94d9bc048a58dea08f90265107a133add3b92525f5d83766a2f1d76d0a8a7d26de85534f10410cffb4eead8cd5b40757e5c6f587285b57e18821d2d0e3ef0c02b6f77c89624a067a77b81aaa4e3f9aeb0c5c51e8b13c16906fdcfd669fefc4d3ad99f7997c62ca2236cfb43707e9d17d411678eeabb7e801df1c9c7ea41e2caee335cd9012244892f9c4c26c523ff7af145edeb81fe9c9f905f9a0eeb5319d7376002b2bf6c2ab43f583a2ae0e1954cc1ff39ecbc6f59dbd5931598fb9e4310755aad3a82710b9a9e4c47890420b288ec67db645d7a2c61ee488114eb0e99403c5d84ac2171bd4f06cebc30da42ae0036ed31e9b87a074695e4e1aaef7c4535ff2fbc627fabdb44cda1b231fb0396ff9dd0615541559e4b0947cc41c20f109f1353e7d9415d5e9beb8ce0bde999ee24abf4e17b03b8d6b80faff084c0efd5e7805e934ca3291f95414c66f5dd87c6fb00c76979c8467fffba84888e0fc3601be41306ba5711ab9a903d2a430dc00d97f5367e48dee6dd4392e19e3e97fc01d86baae5d10aa6549b9006d0d20e71f73cde7e018485334cadbb7e08f0c7d63de58f211571135a42dc6bee8305b9902eb128da19dd3ff9e7cbfbb5293e6a158f4ec914736e3ad56fb9b12dc10fe965eb9087abcd316ab08b63c60fbf14681abd567d3e746e097a3bfe7d36a806ea1eb86b829659ebb10650796384e21bad3acdbc46251bfc52079383907414d61b31c5d5521c6b8ab5ec0f62e5b0b9005571270137fc97dc5bd577c6b989f8d6110801f63fd21b2490c8b03b3799700e29ed4f70a4fff87b4a4db60e5d618cfed4e79583c792072b309b04b8f942bae34ed06170489c87b21e563187f8bcea980922fe3d5677666518da8716caadb4faebb5927a7f4a6b704defb5d8927cb96c99d8728035da035857c0246c1ead33df11fe3222ab564698a9269bf3293c28add3c085edaa543fa085d4c448d93ac5ed04b6de698ac754b0a36c9b0b38f6f899bd072c636030a35e29b8558cab7bf4ab50cb2fd9c4e384cf54ae3abb00133685376894af9c6a2af56da61acb5e1744935903f5f0cc84491857dbbb90d19eafc2fd42868ebc7f12d86bace36917a60c10745e413b30ff56eefcc086adae2c3abe2007fa13691261f4578837650e3b9ee4e5aa493e53c47e3d964d18b709f3c94b50d8ed855ed6e9b0f552934bf23f0c7b7b1a1c95b284f586d60ce0d159ab74b5e6e4d0ff40aaf1c908bda13bcec4fdada719c112913d51a7c2767672a3ab6a9e94d9da3e6f61c66e2823a7c5878201795c6fbff2dec827a781765566b984a31166d7fa33ea15d00edf28265578b1ca377033dc6a00b10f97b1316f5486037ed3ad114dd39f0810329780b02eecd7349096bee0970e4602ae047e53e6927b4d031d48070e58c6680a03a05cf5390803fd5b754d23d0b2dc3d50af8285bae21dfe2487b41ecbe56289113fa7b645fadd19", @typed={0xc, 0x75, 0x0, 0x0, @u64=0x2}]}]}, 0x1450}, {0x0}], 0x2, &(0x7f0000000d00)=ANY=[@ANYBLOB=' \x00\x00\x00\x00\x00\x00', @ANYRES32, @ANYRES32, @ANYBLOB="1c000000000000000100000002000000", @ANYRES32=r9, @ANYRES32=r10, @ANYRES32=0xee01, @ANYBLOB], 0x40}, 0x4000) (async) ioctl$SIOCAX25DELUID(0xffffffffffffffff, 0x89e2, &(0x7f00000000c0)={0x3, @netrom={0xbb, 0xbb, 0xbb, 0xbb, 0xbb, 0x0, 0x0}, r10}) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000540)={&(0x7f0000000140)={0x10, 0x0, 0x0, 0x80}, 0xc, &(0x7f0000000480)={&(0x7f0000000240)=ANY=[@ANYBLOB="800000001a0000012bbd7000fcdbdf250a800001fc04fd000001000022", @ANYRES32=r10, @ANYBLOB="14000500fe8000000000000000000000000000bb730000000000000007000600836b00000c00090080003040", @ANYRES32=0x0, @ANYBLOB="0800100002000000"], 0x80}, 0x1, 0x0, 0x0, 0x20040800}, 0x4000010) (async) r11 = getgid() sendmsg$unix(0xffffffffffffffff, &(0x7f0000000580)={&(0x7f0000000100)=@abs={0x1, 0x0, 0x4e24}, 0x6e, &(0x7f0000000400)=[{&(0x7f0000000180)="94942c3d1e007dfb8404de29a8697799b1f5d6823a70813d4cc3415c6f862e8ceaac7242aef16f9f7c571f15aaacea204d20b49c43182fe1dd3de88c4a06101fc1f8d6139579492cca024fe7db0bd605ad17f17bfaab7d62fb0b847e05f9c41fbfaf79a513efae1ba322990f1327d42eabce0b83ee4fb2b875a3c4f9a1b2", 0x7e}, {&(0x7f0000000200)="993ccb04b5af9377cad757d9dbbe8345526644635ab0ecc50c5c9b41303e1e1f5b1f6161ff3f0a61f3f51dcf5eab537b55b5db80ddea43032815b7908ef405941077ae8e58627fe7265438edb56ef1b6918735c74b3b8fb318d24c30d06cd07d15f385dfd52cd11a49d23837a38ef8284140bcc827accc91e3fb964378ab5da48352949a0f4b27797b96b083028f2f6bdb579e6c1ea1809c644b8e841bb7bc0eb312d29e9fea73a71744649b830f244576a3b1b8f50150c6379a7ada43987439be4e1258efbf5d325ee5f0ad6c9d909bd73a187d299cd9d782beb7a8b2524cf2b61d2dba7e4acf6764b73c9a034907cdd5b7f547", 0xf4}, {&(0x7f0000000300)="130127c0749a951379b88d7ac86bd00a069d3e5793db16848cac09380ca3c6045e088493f74bbdd96015c04cc03eae1802359cf0a739df19bbbc910c3256b1724713e6e5c4be6c2fd26afc35a60e33dc091785fd017c569eea7264d1416c4ee26bc35c2a3ee4c8f285c9da4f7d78ed6613140dfff54f048b51827b8380edffcbbc154571185532f83a58dcf55a3657ebb73d8a261228568bf32c1e5ed7a414a2f8b30a24d5b952ae26d33311c4d23fa6db921a7464444692273f8476e09803bb860b51baeabc34a7828152bba3533e16df0300294a425b07d1a38b122b7ca71b5dfb620963", 0xe5}, {&(0x7f0000000080)="24c7145919fb2421ddb6ab6620204c0441c838579e3bbb9693a8c127c2c88f33b33f1766e6b65d233d3216d16c5ebbe3342d", 0x32}], 0x4, &(0x7f00000004c0)=[@rights={{0x18, 0x1, 0x1, [0xffffffffffffffff, 0xffffffffffffffff]}}, @rights={{0x1c, 0x1, 0x1, [0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff]}}, @cred={{0x1c}}, @rights={{0x30, 0x1, 0x1, [0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff]}}, @cred={{0x1c, 0x1, 0x2, {0x0, r10, r11}}}], 0xa8, 0x40}, 0x20008840) r12 = bpf$PROG_LOAD(0x5, &(0x7f0000000280)={0x6, 0x4, &(0x7f0000004180)=ANY=[@ANYBLOB="18020000e2ffffff00000000000000c685000000360000009500001800000000922ae83713ab9600010000801b10fb54a8cb72d232ad558c46fff4208d4990ec11ce9413ac30e00bd0081f8504e19a5183d769676520e98a263345e44d5ad12bca35510100c4d86abeb12303ff1c9fe0d0020000d60400000007d3670000008aff66d6b3181ffc1d62a3954c1198bbc4fa13aee48ca9e8969faebf3183fe803ab3f5024b52dc265b36fc9dae00a09404f01f9504d0976d252bd8d24538556e5e57bee3b8cf464ef3c6a7def8bad3ca6e3abdb21696e340bb8e2a093add57196b40def3858ef569147fa4108328392d322ab5df10a2f69a6bdf72ee7944e810d0223917c3d042410f57466f59544047d6d8ac44060000000000ee16c729300d2301800000000000002b5a8b05fcc154ad5290a8cdb97c343f454ff69dd6cbde49b28a6cb5f4fc0001745cff6e00e7ffffff0000acf3209a08439f1ff01779b6f6df7e02aa6d7760525b595fe1f697bc114ed1778e97a3f0395f946974cfb458be2a34cf924dc37b5592bf17956f3547497aba814382ff67b345b677a9d6523d87008000000400000000003fe8613ca29ff92be0d8deffff7b68136b0046d535dd39c0f35408869e9b342b953f91447e6b9eab304f134306320600a44095254b45a6c1312a13696c7202df5f764713504facc532c5a6d44d99ec7530ed7b0311000000000000e54e9072a22d911f4a2c2e2fa806e63c5cd98a8569a6d6bcfb000064885117e2ad910eae67e0ebe380d0f648713e68153579e02d71c58d147b00821ab9a6475b31e1ebf1369a04000000fbf3983f283f2f00000000992774814d63c933912d000006000000a66acb0a38856929e7d8b1b06c9bd5d7e5490f3b8596b694ea9483bd4bd287c83dd998a74694d18bdd8ad0983bc90770bbd26a82b9d99d5fc04563b523c47ef8c33400e90d02000000000000000edf1147a7afe772cd45af8aeffe2753088e02ca6bb2feec446ce7dbce66f0a93a03371320980865c7c62ea4d8f8a864dce9fa85aeb0454349100296ee2dba39c3f6fd6cf96714e11fe03b5062809a7418b165dd0336d226bac1e1223be1c97b15175d0e664beb126000e96549e1a1228c686edb475b705eaa9515c96f4fc6b3c925ea404e0f1de61026dc6c6618580fd6ce9eac602c1756f6d1056712412131ed9925989e01eae489ec7052e0ed72c326c7a8aa63999e2297c54ce1822d14b7c7699a9d0600f11f2e7f474cffbc35bc8623cd5eb68af82275a940be0400000000000000bcc3fbe7d90de96d6a8e9f32f18d1f606b381e4903b500000000000000000000004a2357ba5f6000de1cfa88b7165dcf4f2aaee86d4802000000000000008fdb686d5da2a42e4b5024b6535811f362201d4f82012e6af704973d04ea923c19e6cb723c1923b3eea2d73e176dff383c9fbbac53dfdcb1a68c98e96fe39eec23963faf3ebed3409144c7c53d6318ced678a621450a9b01e9f2772e5f2999d3435da02556e36c3215d2bd4e96c93bff3ad04a82ff3cfadcf65eb92adc6c68d66b11cb2d7556414a86dfa94bb7aa52c7febb1e9b2efcbbc5bccf9d39bed802f4f056976a9a362ee9cc624ec454b90200fd9603f96908bddc14500000000000000000000000000044d917c62b27679913075731e8fddb07c10c82002d60181588ae63a440454287de9e340f611267f37bdd0f2d21cb06fcaf45a0a297e396f428d43371424b307eef82c5d6d19f3ef0d3b8f7fa51957e3099caab31133b34a1d3eebc0f0c9056df2e9667ba0b55695c7894010079b07e7aef7785e2486472b5cba1f3346c1e8e23deb8c82bb6eb2c72c484241dc3b66da78260f800fffd39368b952f6f4a10295c50c887a31d8b543c5d10f2dbd4d0b84eaad43feb6e169a9f2fcff7000000000000000000e011bc6366f56fa787f212c1f8c0f47f50b1e9b5d841ea55fe569bb7bf1e78191c8a02ad436725771738a2a98891971e3b932352896e1ea10f62e8ef7a87e16151b39d6c27575714540d8c293a3fa4b5a825360423c1cbc8b5d19167152823ed853140edda002c16c842b168bb55f6bb713deb57d0aa78d6d4e5fc5be2c402bd246128f41bcb02000000892b135a92e8c844938aa98ba4839a1408a696454d40e5eed4d4dce481ca86bfac54c330331b7f2cde17cbaeb0377696faf546ecbe742d73d47d726a50f6e752f3325255bd7e8b5923aa3cfb6f7e06494f21ca450139c558000000000000000000000800000000000000000075aa0000000000000000000000005560bd9eb81e839e4992e64b074a66cccccf00334fa94da8477be7d99b558ec6a5b1596ac1e7617c6b32eed0cc70286caf2c5189a103f4b0b04aff171c4d388ccf67fea37e782f025c94c853cde330a193a967d907a8c88fcb033e680f559a72150cb900bafcd536f48797915a2fe9922ce27300009e1b36aa4730117d9b00000000003c630000000000008fbbd11b015c415ca04192fbfb1a8b0e3460af35771dbac10062835c9bab3ad09f7a022c52d8000000000000000000004000000000000000000000000000000000000000000400000000000000000000000000006ec473c54399b7b8aa1ee46132fc45da8292631178cecf19550108b8b8423de42957ffe9bb6d752e68d2bc2ce777a17bf4dfdfee5de0f3e4dadf51ab9562827b762fa611ba5f32861c19dffe1dc9fd5c41cd46cf131fd6b0c2ddad90ac33f768f9ecc70327c59918fa5a249befe98262f53c8182d95f6da3698a6a88c2c31d801a8f1f5e0ce05138d5422da0a6a62b9dfe1f39775d1d0c9186096415f544aaf76b0a1c877a6c826a5adcfb22c4a0e5a46271caa3eaf4f389dd5f3c20dbddc0377a4266d7b9fd61b9287e9b4be0a413ee31be0ddecab0ef7b25cba1fb3654ddf291ecb7768ac1e177042cb4c452fa6b3966950000000000000000c187da23d6855500fe8510b51e13a890e394b84a6ea2cc8d42b97c697c29122298d55e2e1cca8e07abda2606a3f381c64b9fec0000000a7965e4854e8e3572ad5149b3872342dea9252132860c9af1bd5fe263c0313dea5d6e0c11a466d6892ed65f34667dd79b07b5cbdd8aa7dd561a26b5562d4861a7e1b0f48930e0b696ea3bee7eb72794e163d7aeac9a0fa5403ac9cb421eae283b0550f1d0d339cd7b96e71d3ab48ad9d7975e0c9b117f71d3ab80a0c9b0284ecc469fa6181c9c71fce07a6ffb23296a107763138e8d9876291af2076890c47925ac773d95d2ca42acb3e5f3a1550665b898462c139ffd0106bc8a61b6117d252efcab7106b4c3a3c13a70ff452e9d2096142c517b0e91b5cf88332faca5b3ee96363065c3ce32d3d39ec36e20d597e05664f2526bd918090649da11f7299789d00f5024df1e99d3efecb9b457642fe810370ba4fbe00fa60a28af966a27a1659e448bbe43a1dcd2ea760018b57a36ac41ef2051a7b703d55c0602540663016e20d50385766df4dac47802a55bd38dd767ee9960c6daa704fc5d01a1459134d1b9edfde3be9e25a110228c64253588ff420644dbc0854e69a7bdda72f93ceaccf92cfe7dd6296c950db10f6dd8a5ef9b73cf6a12a1ba16fdc7e35b805f4fd2fcff0a623722149c1465e4de2d53f0f10b14c21865027abc71a12cb1e9f8029c7a20000000eeb0d53a83e518c8d2052c08b515d9d0bde24ac4e798040c7db0bb03c019507d6377f3d5dd94a27abc6d6b120d61f772407e0d2cb50d29168b68aef9f176b4c3aa8b21279d4ea9c1f669aa8c2c17d5b3a8d1dda58d26f1019af04b7774c85d5bce8be010f27c5211938031c3404680b01279c778bd1fe1b48c4b5b8e0fe756e54a8d76b7cec5e3407d93b4eadc446440607de844acf5524a4657e33af2115547b735b57b5092d0bc8fa6acb832509abe0882d570ce400aaebd7baff88526608d6991aac95751671174129457e4a03aca69d82b64b89e6ad6ed1e275ec5002e48170e4c7b4f3971481098dedb88fba90770e44bf404d5a97fefe2fe8e459fe45933b78c7ab5fe985a480193a20fb07da1455fb283df68af569ac82aa6dc703e29bf158931fb79f2abfa6ff7eb8c4f381c9da58bea460e2ead969933e5391970ca4fddd64da2e5df9c4d82044068caaaab771b37bb06bbe673056d849825525f1120b2250f6b8520381f7a74b1c687781cb6b23e67b918844b83dbaeeb559ec8520d710dd6d6b4e64838bd434a36ed03fc0c488b24571032ffbc9f8ce97041e1bc4729d539358dc9599c1266b9ce2cb6dd0ad57a6e9d3d4a11a27f70b2934c96237e2ba09c58eeda678d4d08b6da99b7a86e946215afb1b48792fde54492e306cb5342e2589874b603a1de972b1f09cc350096f5c3e814118af9ba0793cfdf20c77b34eacfdf63ce59ec4d2f867bf884e941559b068d908325667672b5e1cf71f4829c0493e8b141489ed926b822becead7a0a2b4a4c008ab16b616d60f347e4da54f06443507efe57ea62399ef4eb11b2f559e1b056456a53998bf1c6d13c92e75136147f91ae3a75ca15eb1b51bf700b3c0bf54bc3745ff313c5e75dc66386897f6ee45429371b8d0878c442ad2fe9baf85c1390da13efc353ccbef950c29f39ddf436f0d9bf1be1515ed251d8b6f11ecb16b1e8d1ed04196e9b6c2f9e068b7749bb6c1f533e493f22c901662c65cb761dc2eeff2f698bd4dbae83e2dfdc4f1c7f918a00515c1bc189d10ec22b35c92725cbf0ba244fd029c4f026f68e000000060000ab0476c3fd7f7c1e5c000000000000000000000011e43e39d3f4394fbfa13c416b1c443c5e52eea726491ad75100ebad7c6d5a665c59a3fb158e43da904f19e7e8daa4e90390b8da945f6cd78536c0d2be07221f85ad46b180f256d4d84592691d15d65896b66b63a46705338b67b72dc1c3075fcdc5cbffb0366151632ba5be8ae815dfea9fadfd31c473a24a73d3e5116c3023b3563c72d26fbd59877132bde5ca4ef8d92fd3613c768b35223f6fd0b5e9a8b98cccf1e2b4612e620e3a159d6365c9045aaa826aa0ee6d26cf0397ce674c20824584b464ebdc2f3ea26a7aec4570b242a6677a4e9187f8591c3a9bdc0000000000"], &(0x7f0000000040)='GPL\x00', 0x4, 0x1076, &(0x7f0000000300)=""/4096, 0x0, 0x0, '\x00', 0x0, 0x0, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x70) bpf$BPF_PROG_TEST_RUN(0xa, &(0x7f0000000200)={r12, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, 0x48) (async) r13 = bpf$PROG_LOAD(0x5, &(0x7f0000000280)={0x6, 0x4, &(0x7f0000004180)=ANY=[@ANYBLOB="18020000e2ffffff00000000000000c685000000360000009500001800000000922ae83713ab9600010000801b10fb54a8cb72d232ad558c46fff4208d4990ec11ce9413ac30e00bd0081f8504e19a5183d769676520e98a263345e44d5ad12bca35510100c4d86abeb12303ff1c9fe0d0020000d60400000007d3670000008aff66d6b3181ffc1d62a3954c1198bbc4fa13aee48ca9e8969faebf3183fe803ab3f5024b52dc265b36fc9dae00a09404f01f9504d0976d252bd8d24538556e5e57bee3b8cf464ef3c6a7def8bad3ca6e3abdb21696e340bb8e2a093add57196b40def3858ef569147fa4108328392d322ab5df10a2f69a6bdf72ee7944e810d0223917c3d042410f57466f59544047d6d8ac44060000000000ee16c729300d2301800000000000002b5a8b05fcc154ad5290a8cdb97c343f454ff69dd6cbde49b28a6cb5f4fc0001745cff6e00e7ffffff0000acf3209a08439f1ff01779b6f6df7e02aa6d7760525b595fe1f697bc114ed1778e97a3f0395f946974cfb458be2a34cf924dc37b5592bf17956f3547497aba814382ff67b345b677a9d6523d87008000000400000000003fe8613ca29ff92be0d8deffff7b68136b0046d535dd39c0f35408869e9b342b953f91447e6b9eab304f134306320600a44095254b45a6c1312a13696c7202df5f764713504facc532c5a6d44d99ec7530ed7b0311000000000000e54e9072a22d911f4a2c2e2fa806e63c5cd98a8569a6d6bcfb000064885117e2ad910eae67e0ebe380d0f648713e68153579e02d71c58d147b00821ab9a6475b31e1ebf1369a04000000fbf3983f283f2f00000000992774814d63c933912d000006000000a66acb0a38856929e7d8b1b06c9bd5d7e5490f3b8596b694ea9483bd4bd287c83dd998a74694d18bdd8ad0983bc90770bbd26a82b9d99d5fc04563b523c47ef8c33400e90d02000000000000000edf1147a7afe772cd45af8aeffe2753088e02ca6bb2feec446ce7dbce66f0a93a03371320980865c7c62ea4d8f8a864dce9fa85aeb0454349100296ee2dba39c3f6fd6cf96714e11fe03b5062809a7418b165dd0336d226bac1e1223be1c97b15175d0e664beb126000e96549e1a1228c686edb475b705eaa9515c96f4fc6b3c925ea404e0f1de61026dc6c6618580fd6ce9eac602c1756f6d1056712412131ed9925989e01eae489ec7052e0ed72c326c7a8aa63999e2297c54ce1822d14b7c7699a9d0600f11f2e7f474cffbc35bc8623cd5eb68af82275a940be0400000000000000bcc3fbe7d90de96d6a8e9f32f18d1f606b381e4903b500000000000000000000004a2357ba5f6000de1cfa88b7165dcf4f2aaee86d4802000000000000008fdb686d5da2a42e4b5024b6535811f362201d4f82012e6af704973d04ea923c19e6cb723c1923b3eea2d73e176dff383c9fbbac53dfdcb1a68c98e96fe39eec23963faf3ebed3409144c7c53d6318ced678a621450a9b01e9f2772e5f2999d3435da02556e36c3215d2bd4e96c93bff3ad04a82ff3cfadcf65eb92adc6c68d66b11cb2d7556414a86dfa94bb7aa52c7febb1e9b2efcbbc5bccf9d39bed802f4f056976a9a362ee9cc624ec454b90200fd9603f96908bddc14500000000000000000000000000044d917c62b27679913075731e8fddb07c10c82002d60181588ae63a440454287de9e340f611267f37bdd0f2d21cb06fcaf45a0a297e396f428d43371424b307eef82c5d6d19f3ef0d3b8f7fa51957e3099caab31133b34a1d3eebc0f0c9056df2e9667ba0b55695c7894010079b07e7aef7785e2486472b5cba1f3346c1e8e23deb8c82bb6eb2c72c484241dc3b66da78260f800fffd39368b952f6f4a10295c50c887a31d8b543c5d10f2dbd4d0b84eaad43feb6e169a9f2fcff7000000000000000000e011bc6366f56fa787f212c1f8c0f47f50b1e9b5d841ea55fe569bb7bf1e78191c8a02ad436725771738a2a98891971e3b932352896e1ea10f62e8ef7a87e16151b39d6c27575714540d8c293a3fa4b5a825360423c1cbc8b5d19167152823ed853140edda002c16c842b168bb55f6bb713deb57d0aa78d6d4e5fc5be2c402bd246128f41bcb02000000892b135a92e8c844938aa98ba4839a1408a696454d40e5eed4d4dce481ca86bfac54c330331b7f2cde17cbaeb0377696faf546ecbe742d73d47d726a50f6e752f3325255bd7e8b5923aa3cfb6f7e06494f21ca450139c558000000000000000000000800000000000000000075aa0000000000000000000000005560bd9eb81e839e4992e64b074a66cccccf00334fa94da8477be7d99b558ec6a5b1596ac1e7617c6b32eed0cc70286caf2c5189a103f4b0b04aff171c4d388ccf67fea37e782f025c94c853cde330a193a967d907a8c88fcb033e680f559a72150cb900bafcd536f48797915a2fe9922ce27300009e1b36aa4730117d9b00000000003c630000000000008fbbd11b015c415ca04192fbfb1a8b0e3460af35771dbac10062835c9bab3ad09f7a022c52d8000000000000000000004000000000000000000000000000000000000000000400000000000000000000000000006ec473c54399b7b8aa1ee46132fc45da8292631178cecf19550108b8b8423de42957ffe9bb6d752e68d2bc2ce777a17bf4dfdfee5de0f3e4dadf51ab9562827b762fa611ba5f32861c19dffe1dc9fd5c41cd46cf131fd6b0c2ddad90ac33f768f9ecc70327c59918fa5a249befe98262f53c8182d95f6da3698a6a88c2c31d801a8f1f5e0ce05138d5422da0a6a62b9dfe1f39775d1d0c9186096415f544aaf76b0a1c877a6c826a5adcfb22c4a0e5a46271caa3eaf4f389dd5f3c20dbddc0377a4266d7b9fd61b9287e9b4be0a413ee31be0ddecab0ef7b25cba1fb3654ddf291ecb7768ac1e177042cb4c452fa6b3966950000000000000000c187da23d6855500fe8510b51e13a890e394b84a6ea2cc8d42b97c697c29122298d55e2e1cca8e07abda2606a3f381c64b9fec0000000a7965e4854e8e3572ad5149b3872342dea9252132860c9af1bd5fe263c0313dea5d6e0c11a466d6892ed65f34667dd79b07b5cbdd8aa7dd561a26b5562d4861a7e1b0f48930e0b696ea3bee7eb72794e163d7aeac9a0fa5403ac9cb421eae283b0550f1d0d339cd7b96e71d3ab48ad9d7975e0c9b117f71d3ab80a0c9b0284ecc469fa6181c9c71fce07a6ffb23296a107763138e8d9876291af2076890c47925ac773d95d2ca42acb3e5f3a1550665b898462c139ffd0106bc8a61b6117d252efcab7106b4c3a3c13a70ff452e9d2096142c517b0e91b5cf88332faca5b3ee96363065c3ce32d3d39ec36e20d597e05664f2526bd918090649da11f7299789d00f5024df1e99d3efecb9b457642fe810370ba4fbe00fa60a28af966a27a1659e448bbe43a1dcd2ea760018b57a36ac41ef2051a7b703d55c0602540663016e20d50385766df4dac47802a55bd38dd767ee9960c6daa704fc5d01a1459134d1b9edfde3be9e25a110228c64253588ff420644dbc0854e69a7bdda72f93ceaccf92cfe7dd6296c950db10f6dd8a5ef9b73cf6a12a1ba16fdc7e35b805f4fd2fcff0a623722149c1465e4de2d53f0f10b14c21865027abc71a12cb1e9f8029c7a20000000eeb0d53a83e518c8d2052c08b515d9d0bde24ac4e798040c7db0bb03c019507d6377f3d5dd94a27abc6d6b120d61f772407e0d2cb50d29168b68aef9f176b4c3aa8b21279d4ea9c1f669aa8c2c17d5b3a8d1dda58d26f1019af04b7774c85d5bce8be010f27c5211938031c3404680b01279c778bd1fe1b48c4b5b8e0fe756e54a8d76b7cec5e3407d93b4eadc446440607de844acf5524a4657e33af2115547b735b57b5092d0bc8fa6acb832509abe0882d570ce400aaebd7baff88526608d6991aac95751671174129457e4a03aca69d82b64b89e6ad6ed1e275ec5002e48170e4c7b4f3971481098dedb88fba90770e44bf404d5a97fefe2fe8e459fe45933b78c7ab5fe985a480193a20fb07da1455fb283df68af569ac82aa6dc703e29bf158931fb79f2abfa6ff7eb8c4f381c9da58bea460e2ead969933e5391970ca4fddd64da2e5df9c4d82044068caaaab771b37bb06bbe673056d849825525f1120b2250f6b8520381f7a74b1c687781cb6b23e67b918844b83dbaeeb559ec8520d710dd6d6b4e64838bd434a36ed03fc0c488b24571032ffbc9f8ce97041e1bc4729d539358dc9599c1266b9ce2cb6dd0ad57a6e9d3d4a11a27f70b2934c96237e2ba09c58eeda678d4d08b6da99b7a86e946215afb1b48792fde54492e306cb5342e2589874b603a1de972b1f09cc350096f5c3e814118af9ba0793cfdf20c77b34eacfdf63ce59ec4d2f867bf884e941559b068d908325667672b5e1cf71f4829c0493e8b141489ed926b822becead7a0a2b4a4c008ab16b616d60f347e4da54f06443507efe57ea62399ef4eb11b2f559e1b056456a53998bf1c6d13c92e75136147f91ae3a75ca15eb1b51bf700b3c0bf54bc3745ff313c5e75dc66386897f6ee45429371b8d0878c442ad2fe9baf85c1390da13efc353ccbef950c29f39ddf436f0d9bf1be1515ed251d8b6f11ecb16b1e8d1ed04196e9b6c2f9e068b7749bb6c1f533e493f22c901662c65cb761dc2eeff2f698bd4dbae83e2dfdc4f1c7f918a00515c1bc189d10ec22b35c92725cbf0ba244fd029c4f026f68e000000060000ab0476c3fd7f7c1e5c000000000000000000000011e43e39d3f4394fbfa13c416b1c443c5e52eea726491ad75100ebad7c6d5a665c59a3fb158e43da904f19e7e8daa4e90390b8da945f6cd78536c0d2be07221f85ad46b180f256d4d84592691d15d65896b66b63a46705338b67b72dc1c3075fcdc5cbffb0366151632ba5be8ae815dfea9fadfd31c473a24a73d3e5116c3023b3563c72d26fbd59877132bde5ca4ef8d92fd3613c768b35223f6fd0b5e9a8b98cccf1e2b4612e620e3a159d6365c9045aaa826aa0ee6d26cf0397ce674c20824584b464ebdc2f3ea26a7aec4570b242a6677a4e9187f8591c3a9bdc0000000000"], &(0x7f0000000040)='GPL\x00', 0x4, 0x1076, &(0x7f0000000300)=""/4096, 0x0, 0x0, '\x00', 0x0, 0x0, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x70) bpf$BPF_PROG_TEST_RUN(0xa, &(0x7f0000000200)={r13, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, 0x48) (async) r14 = socket$inet_tcp(0x2, 0x1, 0x0) (async) r15 = openat$tun(0xffffffffffffff9c, &(0x7f0000001f40), 0x226e80, 0x0) sendmmsg$unix(r6, &(0x7f0000002000)=[{{&(0x7f00000006c0)=@file={0x0, './file0\x00'}, 0x6e, &(0x7f00000009c0)=[{&(0x7f0000000740)="0b8ab99004e3f31defdc48d3025abdedf7e1436310ef51cf44ce8b14ed458eee091eb0bfa9ec6f0530606ae03baab7085baef5fd1ce24e4479139dc4dbf086dca9fb731ce0385a7d9bd14d72006b8e72005e1e71f41dbd53335c37b2ecbe7941cfdc3168585a4057ee4cbebebd0b2af6fe0f5129031279c764d5097dfffc950cd7ee68834359b30362010e88f39689a19c66eeca0764ca6ef31cc6cf5870f9995f9eff9e07879fa9966431854517a5812b3860e1320552927cce5a5cf4bae4b92a99937e06ae1d5cfd71171d362cf2298d458b1c35eab25d61ef4f1355", 0xdd}, {&(0x7f0000000840)}, {&(0x7f0000000880)="c5d23dc36fe0e908e0409c995a363d052e8fcb37f0a7e11440e469571a44f62eedee9bc74f2a9e8f59bd9451dc2ccfcad04beae22ddc912d0ebf78d2124f1d3684a18002288e5b23fed6c08d8e", 0x4d}, {&(0x7f0000000900)="c66b6d33a8857c4f93fe523462d8782c37954f8bfca42bfb0e73762788d9bcaf89401118b776dcf3cb8e876a8fe7adcd3757e11d41e7aa", 0x37}, {&(0x7f0000000940)="c3ef119ec808f72ec4094b679fbf1aaf2cf38e8b39cd24a43f3c7fb2c8f302cbaf342da1680f6165a3704b47ec0e6e18b3e48aa2225c7b606b86e4c6d0cfb77c8e5f41242f7d0af8d8bb4d53c7588e4690332e2727fac37a7120595946460de696bd55193672d1", 0x67}], 0x5, &(0x7f0000000d80)=[@cred={{0x1c}}, @cred={{0x1c, 0x1, 0x2, {0x0, 0x0, 0xffffffffffffffff}}}, @cred={{0x1c}}, @rights={{0x38, 0x1, 0x1, [r1, r6, 0xffffffffffffffff, r6, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, r5, r2, r4]}}], 0x98, 0x4}}, {{&(0x7f0000000e40)=@file={0x1, './file0\x00'}, 0x6e, &(0x7f0000000fc0)=[{&(0x7f0000000ec0)="42c9c677a79354bd03351dc160ca532fc656c65e8c3fc6f9161ef0a747a325aef705cebd83fc08e9d9217d673d71e27dfefb27f27c24c200297385b42d21391719ae3de89331cf25463aca3dca69db5722b556b8d830e2f86dfaa65c22112e8d34251ff3bd47d72b491c190ef2d35571d9ea7613a3585a49b4d61abe3553d69bc7ea515f3ef78ea2256e3abc70ecb82162cfddafbf8a49e84c8ee0647c67465edf50e9cb18e09d6d2eead989a72e68c8382ad18e3ca115f01e3debd9d2d0429f275c75", 0xc3}], 0x1, &(0x7f00000010c0)=ANY=[@ANYBLOB="180000000000000100000000000000", @ANYRES32=r4, @ANYRES32=r3, @ANYBLOB="2c000000000000000100000001000000", @ANYRES32, @ANYRES32, @ANYRES32=r6, @ANYRES32, @ANYRES32, @ANYRES32=r5, @ANYRES32=r2, @ANYBLOB="000000001c000000000000000100000002000000", @ANYRES32=0x0, @ANYRES32=0xee00, @ANYRES32=0x0, @ANYBLOB='\x00\x00\x00\x00'], 0x68, 0x41}}, {{&(0x7f0000001140)=@file={0x0, './file0\x00'}, 0x6e, &(0x7f0000001200)=[{&(0x7f00000011c0)="328e8616730e94d95ec24b9c6f8873e178bfada5b3e6c050c3d8ab2137bd5e89358b1d193b640f72687e499af2d4c7e95b066e", 0x33}], 0x1, 0x0, 0x0, 0x20008000}}, {{&(0x7f0000001240)=@file={0x1, './file0\x00'}, 0x6e, &(0x7f0000001540)=[{&(0x7f00000012c0)="3722910fade2a52d513917386f047c684997907911e8504d352e3220d07a2afc768b", 0x22}, {&(0x7f0000001300)="90b1e1c1f7c02e84210cc0f3866836ab070f6333119a4ab800d216a17f448ef4bf99deeb7271cb1780b8014c3fde40f2d14a07bd146b32b6d9a0a072913beedc22f5de01dbd949771ac9525b71d4379d848c1990dd758c74", 0x58}, {&(0x7f0000001380)="c144ba1bb64838e8891517e307fe3bbde227c5172dd75585cae80b1fd6a3c84cf520513f2e2ec703e64683782d98c9d5f58e6c2a4b5f5a92cf7aae65462093b29b09f70cd08153a7d78ae7c5aa9cef48aa8a4315eda93fe4d458c94038b9fb2f279b9401d404f4cdc8b32e128aa4088afb27976308a4ebe5fba77aa08d13eba68027ce1279119b13fcfd8aae88a348", 0x8f}, {&(0x7f0000001440)="c565ee8b3a54b1d16810dfa319503e24b9bbbc78c23d66c12153e4df62d8bc649b4d83252d4481ec5da2fead7befd8e4bf339e22f768ceefdcdaef63da61fd2e0d2e05a918d2ef89726ff760bab4ad3686bc4a267b1078dfdf2f81bc6df3d2d4c6a5d7999c4d94c478a6257d823ad13b7d6a9c474a44aae5b9c8a6e2c6b31e91af0b379e82856d4ac18c49c2580e99221b868fa0404e15893353a8093972dc90beb899b5bd6f70838968a841cf1d5491ed8be7c863", 0xb5}, {&(0x7f0000001500)='?', 0x1}], 0x5, &(0x7f0000001800)=[@rights={{0x28, 0x1, 0x1, [r3, 0xffffffffffffffff, r5, r4, r3, 0xffffffffffffffff]}}, @rights={{0x28, 0x1, 0x1, [0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, r4, r5]}}, @cred={{0x1c, 0x1, 0x2, {0x0, 0xee01}}}, @rights={{0x14, 0x1, 0x1, [0xffffffffffffffff]}}, @cred={{0x1c, 0x1, 0x2, {0x0, 0x0, 0xee00}}}, @cred={{0x1c, 0x1, 0x2, {0x0, 0xee01}}}, @rights={{0x14, 0x1, 0x1, [0xffffffffffffffff]}}, @cred={{0x1c, 0x1, 0x2, {0x0, 0x0, 0xee00}}}, @rights={{0x18, 0x1, 0x1, [0xffffffffffffffff, 0xffffffffffffffff]}}], 0x118, 0x8000}}, {{&(0x7f0000001940)=@file={0x1, './file0\x00'}, 0x6e, &(0x7f0000001e40)=[{&(0x7f00000019c0)="e9e32c41e4161ac80cf71fc8619599e7e3d7cf606196aa75158c56c97f1244b4683f934607e09d391d23148500dbe32cf66c3bc3b3fe4f8ed5e8c2b7c0a735d8ae508fd66c7a7014604208ba43277a4ca6a73dcfb845d2a4929284259e4be16a3674b05f189a36616cd1c534f3bcc344006a514f1627465f8e4487e6416402d4", 0x80}, {&(0x7f0000001a40)="f29f50bd9a9d9d77a0c84ff27e307ae4b9441b8928b2a4ecea8559bed6289e0bec18ac42056f773607516328b869d6e315e7d0911b3041d57d8d54868b92de98d9dd133953d90e4f2b37a923e3524c505a605f57e9d3239ebd594b3d8c7c3def9ecd7977d8f7a77d95b37239cfca0fbb437cbd6c23ef368978c89d260500211ac9af1ed37e18388a33f76a4c0b3f92a60df65555eaefcd06373ba1ce5dabbf0c8041", 0xa2}, {&(0x7f0000001b00)="939d80c0e28ee263596d7815f32a55119cd251c2bb70a3e29c0f4cdaf717ee454d34169b4368f944f06b995d2eb11ebdd2c57d23caa9d657d5cd666d93b5aa99", 0x40}, {&(0x7f0000001b40)="18de3d1a090c3248c3c138466b46d981f5961f9b44e631aa8e156483887bccd733d1cea050d28fd1447a8ef885d4886cc33dbb0fbe81267452e02cba175af212525be34eae59267002de2a11337cc21f2715b764d8f5a51788d8baf9ce81e54df913134bf123c0ddc26645bfabfe84d7c34f1b3d11a50646e9766a784c3bbc381f3bc17e7850a425ed259b28d06086e6c92eeba316cade95f350d26854220cea30696661edba53d1", 0xa8}, {&(0x7f0000001c00)="2eed2d06bc4a7c4cb6420577c3f7347ed4943868cfa5e038c5689196045379745d3a5799845c", 0x26}, {&(0x7f0000001c40)="a34d25b014da5df8c04869f6eabe60886f898ce27ef40b830744f11225290ce8e2281cad7d7348792ab5f2", 0x2b}, {&(0x7f0000001c80)="6391b92ea42ec8033e2bc025e0cd282b23f182489f7850a35f72648f5b5f82d0794b91b021f45e8d64f358713d8397db07a64ea062663ebfeba258c7dd42b0ebb807b566bef62d97b2d14da9de17b19e3c4d85230a86899d158b7e0441f613e3d8fb654346f52daf1e19881309cc8f5d15e7cf27c5b37c1323df5d497625a01342996e8b8abf8a0400922bf028a6bd79f998a6165c913d9a68a19847978d372d57a40cedadf47dc3e2ee9995f8897885eff2f3cc532dae78e3", 0xb9}, {&(0x7f0000001d40)="7d32f4f3a02f098e56d123568c67a8b7d72628f01dc541a58134d2ccf04c55e2d42f38cf4aecf5b308692cd14681e8f7d2e1bf88e4668890a355919dc8899b3fcf25099076182875cf2aa3d3c83368f7bc2ac97f88c13c783b256f20747fbe5ea6a0afb70d874bf0c1b56b7bf8710e37e01e1af2735ce8a1fdfcec767ba9408d87d9c7ffd44b96b93abf95efdda9751d44010d5ac10fdc83cd2c89e2424c5b5052f7f6eeb84559b76ccd48c1e381d25886368d327e1de296eed50b9232a79555ea3c9d5dfe66ba5ff90a2d0a0ba9b4e5b63a635b22011b9400dca0b7bf17ec1d037a02a0c7a8f2cd875e6f68ac9b1d5a67bea3ee913099b20435cc", 0xfb}], 0x8, &(0x7f0000001f80)=[@cred={{0x1c, 0x1, 0x2, {0x0, 0xee01}}}, @cred={{0x1c, 0x1, 0x2, {0x0, 0x0, r11}}}, @rights={{0x20, 0x1, 0x1, [r12, r13, r14, r15]}}, @cred={{0x1c, 0x1, 0x2, {0x0, 0x0, 0xee00}}}], 0x80, 0x24004090}}], 0x5, 0x40008804) sendmmsg$unix(0xffffffffffffffff, &(0x7f0000004000)=[{{0x0, 0x0, &(0x7f0000001540)=[{&(0x7f0000000100)="dbc4f24b18fce59e560c179fe1a0e7a051977ee0c3b246821ee70cc5b4874a0fd7b5f20706de65f57d085571d3dc21e249b5a0a316cf18dc611fcac9d6cf65fb73973e3cbfa874ad40e619990b0489585b6b1bc7ece2886b5332bb7543fd4fca01d56fca29c82e8d042e615c7414cbb846a2d769b81fcca44b7a9e2c57fa5cd63e871cdc526dc5f2ce1d46eeed22360d31aec0cc3b6cb321f1097d2eb11616c612f9abd3f9363c20503e8f0bdb622b4369e186ebf9592ab60a7548f727ca152eeb0e362d61c62630c1", 0xc9}, {&(0x7f0000000200)="9b5fc994668022390ede555253a754f67eb9585b161ca1030b9a4a0e374e25bed85a93013ed5ddd7fcce7bd5a58631904ae97eaaa80dbff37cd10cca63a3a6c296391d764f28fac76c13fcc8ff66c88e54c8be0c7f4704845653cd78fcc7bad2cd42daa117c886dc13fc8070a7ca47cd612167639d7e0481b0467cd46233dea07ba7afefef764f38deb3bdcfdb26cab03e340b66d93de2ceb463fc4a079b4256de036a307673c90ce0a65e54c6e5e43b15c6b98330d72031b0e9b034d308c7b278504d7826088114da1c1a8771d42454af2e4f8b814c23a3909a31cbcb4194f39a574a9332b096d2f195a0bb64c8f90d0baa107168130772ba501a34ade1", 0xfe}, {&(0x7f0000000300)="92591ffaa769533f6a3b214975b30d0de86745302fe06013dc26ed63edf67b4dbe25c9dead1070a285dc22a69e9717901cac58309aceb8f39fa017bba29ae63c2a8af5ca56c6e040e84ae7de73c42e26ecc4bea395798df3568de0d431606f0a03739c6feb8166fd355c68d86293aa9a78e696acee93f2df127893649983d97346fb3832cabc8453dfbfefcb5c0b6b63dd718ab4e1c16735c35114dcf2c6ac5026d45cfe581da580fae61a311360f7de281241e6687bd0ea459cdb5c1b56b71736a90238d07a3700c9c512549dd29d82dd4cc10f654540bd4fca5b6d8a6e2f583886aaf79c131085190585fd86b7d8f0ea214c68a27b", 0xf6}, {&(0x7f0000000400)="22a4e9c5246d22d7625613407cc09286f565af4b0e143b536898a61683bcc0045d093f8f63a12f43f598f4f0fb1ccdc7fd17f7e11ca892648538c38a8009a7dfa62c6035aa7279bb601539b786c8121d56aa8d049ac2b152597503008428a666190ceda8725f8ef0812b3ec6ce876c48a8e1882827df28635e8ab910ef8c01484394b7fe206c042e79246c8ac690c05ae71e1a952acec3f1f0a445f1bd395660a92392711fa1a58154c0b2aea92e7b328c517aa5811a740f3253a402dc73f94e73e3e2ece62d3ab9ca2a0a6d458ecc6ab643a6a4e8b7d87e9a47813d522b7d7d9d2846323e4e4531cd3c375e9e23f37a1fa581383aac63649e60e7d96bc87381630c2040dd95db5b1b97bf32ae5c66f974e98ace003a33dc5a922e34712daa43bbc86e76fe866589cb8fa1282b5ed8db8ccedd64e312cf7ecf76e517938cb6ff93c8598bd4b7ffddf7b7e9595c719f489a0af55943b51c892dc828412a40192d52048b494590550a72541d6d54c393e1886281a1b57f47f536b15a1f27e26392360ccf7ea4759d4e2c3d37c9819211dd6aea334e15b189b086aabf3a42b9306071a3c0b57c66f94de6bafb0cd1fd22a048889656a0458d693ff8562c3c9c8409126082db148b1de8aba23f2569daa955b64b45769680a6204f77189d5f24a044fe1bdc297d5bfed4b7cc113ec1a50a3c6c6884519ea12cb6bc811df9c5d3e6458efb9747e48bd238c19cb8057febc3599c94208138bcbeed9c01003a0298a103d5d004e0851870dddf9363a86b71f670550f6641148164b24adbf4e668e8ada237fa594b943c4a5529c613f82719a5bc84914f1063c5a121296059641af891c2ec84cf1ac6658c7fe04108a502bcc70e93014f9a5e81e4bda3196aa02915c8f0ebc8f1e442e5c33d8e2aa2563bf4a961aa2dedeb2636da2286efc7b8d32971b80aca20ddc08e6de9c6c062450355d7860affa75a772db952c3eabb7f3578643bac353108b52e350f957dbe25b3a8e52cea9a89aaba3def7c7365b22fb16a22ece96d4a65e39aaf2045bb01de2d207e886219b006f8414cef79fdbecf781706b45fffdaa3f2442280776e2fc7284106739609a11862589a058c659f95797839363c7e8006af72758917a76e45c8cf0358467b74ed985e960b2460d0b5d3e8ef94989c42a4905ab231ee823993095c62cc48b93293f8f8a0e9b611a07ec19a9a08c8a5d12ca63e20bcb9e89307c081f105ab7752d054fa41e7120e7679d3acff82d702784e20c30aaf4d444195fe02f054bfc82cd4f7116493b4734136e2f1f33c2a03b8c1a32513d1da6f49242a72710cc200246393dd189bb2ee028c861c82298a9649ecebcc382ba6e0580f42c85aed9d2dd7e788fd246bc0ef9ce607173b1b39b9192d20588b1bb9aa2f34c1c3488dd61cbc5f176c4b36ca04f644985d80f02c22ce2d6df18e33aa740990cd9ab84f3df6557ae980ae6278ce9b6b63ac4a18d319e5b5a4804146a7cb8b086f22cef08281f517875592c5003044ce6a6913017323a5f24997334ed8359f925bde255bfc1deee146bc116eb20c7cff66f0a2f1c8df00171fcb3062866e17206a84f4369de92709e8642cbdbeb923969f6284c7d6af58d8e9c5d7207a75b7e1ebca1c55923d438dcf8575558b422baa7eaca5284f1814c11042678d5ac76c684f88ad78cd79141b8744473d3233d4631cbb355e90921aebc8205cf8ddbb9404494d2b152bda30a76ecf8adc44118181240bb945b58e1219cc4c3bb3956246d8dab179e64826a54064fa7b110a8063025d327b5357f6fa7cee845611f62d4af9996fe48bf0d37d455b2744521e66f33e0832d564846eff0ba91e9043e037f34f1448d934b35cc39ff293fcb4ab5afb3c72920fb85dcce7d8b13676d780020f72eee9f2751a242a9e339686ac4b960517a96293845ad7c1e125693c88dde2d3a4406c6338fb69f86370b9d0bcc725039099a4ba30b51df0d67b5dee44642dffeb65a2bbcb434b9df0377fd0fc814a2b8bcada5eb4f2d5e2d3e782b663d7c4d53506e078ede182ac0cb9b12b1d9ffa462d03e990761e9fa6ba5aa7010025c63a95e40ec0e0d5e3636ee50a076e07533567b049d00218a328516b797fd82185a780841560780ee262092b30dfef26d79928caceeea3e900e8486678cc558154a58d75ce3e3e537c5a8d5906200900f8bad0151c996e2139c068129352eb3276763606631f72925c568709ce8a50327c1dfb66506312eecd8ae3f87eafb55fdb8fbc00ba1fe6d5192bfe44fd6c40dc7646a8d06fda1e98e952988fc2c930215427d2b162b1d17bb051aa61d3cbb556784e36518a853751e01f9de3b1007888522d73b367943cb2aa34772c0e22ef74a9fb6a5578a42feee83de6886acea93120dc876aef365bd1e0b62990f8f0697053c43c26a9d241cde299cbd8ea13b18cb61bb0b1f8202d6045a98e1f8665c0b9485cdaf769d596902dbc4c34bd2b81e4be342031cc9f62405b30f8d6e1ce0a1a4c2f252c293eaf9022a3734b92be39b79340e8b07747bc1dfd1c6f8dadb5fe8855b7c149ab2dcfd06d6722adb6fffdc494fd031d4032c44851e02e389d7097fdb4f89b41bfa8d501a57522d3448120b37855f8addf36ab39930deede738fb936c2ddecef7c0e77dccf9619839b0f636e2670e017b4facaf2152eadbfd414e8971593300487c736407abbfbc9f116abec4ac7637f2bbf399415801230df66acd2838f9f05c21ff2089a82e6aea2e5a3f1841cea5348eebf531987717c0f171940373786e9fd74913292c1dc92cd81ce4c43f705a989e2cdabc5450a85a8a6dbd68f47b4546c28d54b501c25ec0dcbcaedb934c27db33418e12e1ef4cc05b9accc104afefb90819237222f187a8c45a5cff0823606a5161f323d2f5d2477af5e33fec044d241ee655cc3b877178eaddbc4c3ea9ccf0a8c02b333dc4d2db4240750f9fcad4dee434a6871a24ca7b875911da5347cb1a238b6b864afa053b334a8f84ca11fc69c8320ac21dc7366aac8110ddb5053bb2eba61f3365df020a3a40f5c28f785fcb1231b03b27e414112bf475e0d4b9a9324fa5a929602c4ca5342492809462eb3e33ead061ef97890df95c5876140905a25a8f50c3499c7e41be3c21a382cec592d6786a8a5b11633463f396d6709e9e89bceef42843d23b05d82d8479a7949902fc133aecb6228bc85202f2e0403fa1be9a8db8080c849330ffd4073e9238a3b21e223011f14a65e9aaeebcda997b4ee32eb5372ee4bc0106bf5b5f8942ca8e0d9e7e301cb08074738293b144eaf723cf26f4fab0652d7fe7c4f6723219ee205a4a01c2f7080100b583609c685192d7aed5083a69969782d76c38fdd4dcb61a6ef51c9d95bd7fb3212ecec3b383552970e58a11dc99169201ca95167c59d99fee8fd8bce5b590ba620a168a3592c15c62f756b6de4e1b7deb1a9bcfc57a88d5a3cf9fbff07b9d6094b74f6c27d01f30eea8e7741130dd7ec2099cc87654e2f0c689c85c5a24e1c63f37e51e98a1844a153f8599cd75371ea482ed74e01970e71b0d10e6afd7f0615c7086f220ffef9744f6856492fdd93082232f3b3bea309bc3bdbb175026777eb19b3eec490d477d329e0353c889191b576b3ee22042f81b162c41de634cbdd8d913942f8f099d532c2a792a947d68eb0ee13ffcb5b0304de64c8bba0c72b3ee5c9473d2058e41b963abc824b4e644c15c5040219108415bdfb7f88a6d5b1e305721b927f5f84d106bc0f830609b9fc72e27020eafc7c07948673b564d640a8f8cdf76d1452626ca38aebb1bfaba986c0cb28f162a0077a030a79dedc1030896f5342c76130d3593092f2b25faea80c893df46c42649dc88f920a3b991d877481b531a7a8d8b90a5dc7ca90bc22936ec00702da7ae01015f5c46efb0b1a189483aa908de49a9b2b326b0a63a46cc24a5a58113179683eb1dcb2fd6f7542a76d739de7b955f34efe2e6530e7e50eca4b92cfdbcd26f17d2da53ab089dbee9ec4d7f5af21552cee181c805206a123587b3b106a81dc72392904f3f6555d0b709214d11d4104cf551eed8740483c69105f4ee209bb2644e21f3d785dbea25ff4c3e6c3b0d1abb1c47247a94ef276f196c14ffb271427ab0764abeea0104f0f1000349b1e856f9a6eb8fa04d51ebbb6ae3e2547351a8fb335671144a71a223fbbb4dff6d4ee767bcd8167548d1f7746895eff32e320142c927a48815bcc0d37aadb1f752a30a477b734d6e83327042a87a7902a06996d4571b7f35b5398ec162d398f6170a634a6f5d7446b30dce0edd2f7452d668ce48c28f5672c67096b5e43cc96ce4ed4af7a5c4c9b6b293f838219948e0c6a6f1fa4235b67cbeaf589332b2d3f1021c5573f361e809d934c602e5f890d06ccfe22ed3a5fc8ed8ebc0421296e43447f8c91da6bad796d4a3f843b9e78b446c5d1ffa8d8125ac838b64ba3e0fc7801d1ce4bfabbb5491eae994633f0aa3a99b2d3d6db3276d40bc6352742cf2353893d2004c081392652804958259dc1ee479beae1729e4d1dc3bf75e6e6f593076c84557374cecba1cec7f269868bd17ae3f9edfefa918b7de6dc33f5282a8ed11bc5628098fe9936570561d5c7331cbd9095152c217f4bba4a9d98fe45d27f7587981bebcfb21542de0c1244959dee05b753d2463b20f9e1be490dec1176414a88f4c8bdb8e658875adec7b8136bcc0a8a3b90384c43ebdea5a6c25c0af79b49bfb41247d8af8023315ca6181da2ec3542eb9ba567552ce3ef999f6778c2e2349d93b198bd196ccbd2513328c66b84c65475f8f5cf8c486076dcbf812ca812169b001ac54ea607778aaf1ec516ef72cc54cef8c0c82861f0faa78a87e5f8c8a0eabb07ffc5f44ff6261167587fbd0b33be0a1a923eea862d1ca8de55c8b8b2920c899fb937908bd45d0bfbdef74543761127ab9a55940c43fe8c6a525d4a72952bb5a49fc36dd834ccd953258bea798af82fec0e87fca987d7685570e6c768b8dc1504af9bd97a4f9069594fac84f8c7aaa228ce2037c459eaf92f51004a7ce4d9a6ca4e14bc874d6eddc053756f75742da808f54f322edae326ecfe62ee8a4234bbe008ef801ad411cdabede3b287c64f582157b890a2db76ca49bc4657324ea6e91f419b3f9a9814cb798d396f080f9a29783dc0b2809832dcbc916b92015214282473511b28b389b560429bc0361e800a1fbfe8a90ed362803952f1442f83ed192637f6e7a8fe834608b8e66e05ce6de8ddf53a25bd3d85e3158982ed014fb24a9bf5feec63f3775b2d19f4406cf857de88479249ea540d14579fbff3be4447df73a6560bde2bf298d29d4308840c47fe8c80d13942e4fb010095f4ae3e02c71934ad90c123fc0721c17b0ea7b6b053c7f7764de8f63d0f064f40443f8c654b00169c9debd89321dd62b5ddd1c560ddc7a5c5dfa03a6e29a2d3e97e0441903a9028a1f3e86986e044fbb3327fac1bd749e701883b3b6472a9ccf1c063270f5e6a44f10acdd281e8880edf5a345ceaa2782f5b61942058e121b88b037bba900463e63023193ada76f995d0fc389c8c8ea56170b63dfacd5a4c126a39370af6d39252f76e68cd3f30099857f5dd79716f89bf8e8d22cb989723ad807bc9a3a2611e819b0ea6f0908eb80bdcf406b355c5aa406b81464cdad037dd59c2065708f972a3d6ee9ac175032cd96d8e31c013133fa03cceb7fbd8d27c20b8950ba86ec74ffe5175212f68f662597f53ffe2fc0de2ab729716ef40a4e676", 0x1000}, {&(0x7f0000001400)="3ac339f3812e07d9bd54490ae4468588b59d8df79a6b31aa2fd71f8b2fc0ff4f66f01582676832b8d506477dc75ed4f78e39c0269fb09d9d6f40ef35b5e17a9b378931dcf883da028bcbcdcbf97875b82ad1b287396428ce27bd7a17da0e29d61836906cc74418e9e28f36a2752dfb79efe8380c58366352f31d0162f7f7bf877ad1476eae51f1d0120b9b0639f61cc1fd594ff5ea0a881144430979c6deae282c1f5a8fca5bcd27bbdf6411b0e7f083d93dccd95b424d8f3f226d4b4a776ff6", 0xc0}, {&(0x7f00000014c0)="e91e9286a9f9d68f3079791af825d71318328b80fc6e23f63d7fcab2bdb5ad2d3977962376b57f1eda3407279bd91556a4443976325eb44b0cebdc6d544f9447a9d7a95ffb0a679cd2ff6acaa0b892a1c13b1478f22dcb68a1a9a33a44cdfd7d009df0770fd52270f22873c8835caeeea10239a1749dd0074812bc0d12fa", 0x7e}], 0x6, &(0x7f0000000040)=[@rights={{0x18, 0x1, 0x1, [r0, r0]}}], 0x18, 0x48001}}, {{0x0, 0x0, &(0x7f0000001700)=[{&(0x7f00000015c0)="1d", 0x1}, {&(0x7f0000001600)="23d7500162489addcdb8e0ef36f6fe3631a7e452580c32098cf3313c0a297961ad4c1ecdd9d462fd152a776ca2521a2f6f368d2d3584b567f7f95a44d15bc493e6f5d85a922dcc185820130a12ce988aea2cefed809c001c9cb462ab14dda1b20a7f5b886ab61b021c46b00c83e76de05ad5d080775423a29bb2b025eff86a6510a0f44d3c9e2d8f9474a82b2f2436ecc704b9ee0f4da3cc5bb0d0ca3e3b34d73f53df66c3ed6d57e9e0e220aa143f881459f0abd5da9e792cc17121a9b20545982aae396830f842", 0xc8}], 0x2, 0x0, 0x0, 0x90}}, {{&(0x7f0000001740)=@abs={0x0, 0x0, 0x4e22}, 0x6e, &(0x7f0000001b80)=[{&(0x7f00000017c0)="3261c866bcef0f0738fe676c4661013f5ed839e583c3f5847fd2565a1f2a2689a5b66391c66e9fab6d6c34558180d981d3ca9dcd0847df4b78659e635fb959a089f96860ebc076cb5019114bea8e0dc80917df90c5aef5d32e13b24a3918f9034fafe06db2841583e3fef80fc400180d45a62682f62b41a33a5fce0216d27db63f764a972e51944deafae86ed24fa66b1913fa", 0x93}, {&(0x7f0000001880)="ddcb551c314ca6e28c135a5d99b874ad7e3fee557098b239914517f36e429cd44b89b30147952c74ce74", 0x2a}, {&(0x7f00000018c0)="4c34aa080a1590198de21f893039b7cf58d73ce929fe5e1b9b03ea82650b17f347ba96f9bdca37d119261c5d5e6e770cc1f68ba8851ab3b460e25df10682d398d15a578748c8422669aab98629a01e3e35bb0b8456b110a8c66750c99d115d4304863c569646bae6cf4d4cf58f2a4e5a9ed6936b1cab0a5cab6456a24a74fc3d665164f5ceed7b9e40d8fd8a60e89cb67572e516c9bf2449840c85066f78bbdb68c2103a46fb825a1f1c79b2cb205aec986ed0", 0xb3}, {&(0x7f0000001980)="07d549102648b2eb135e07712c223fd96c06b3556edeafbfcb697cd4397b0dae9746073bab2308692f3e7148707df8fac453bf79db66427b5257f3fc450943d3d54a49f344665d5e6f13814d", 0x4c}, {&(0x7f0000001a00)="139e2e3783ac11cf91fb0815f84887a9bd200f75164cd5a53255fe80645a2356095833eb1b7a3ea507a6caa36ca98f6323dbd8c56662ba8aee03ebb5ec25afdf71e2b76e4ff1aa94d2de83699c9893bb20089506d06da6f38189ff21566520c4dfc56998713c2b67173d1eb4252fee5bdcdee49408b57dedd1398730982e74a73ae875041e4bfe40db1fbbbcd84388f3bbe0f3f811b591bcc2b002736582ed4fc7269090e50dc46448b2a05e2cb795b91952dd79494203578283ecb3060d7b6853815c56c9eaa81c7d88e0664e50789ba578d0666dae0e67a9282c543c4a04cb26c0e3", 0xe3}, {&(0x7f0000001b00)="732d1f48c004203e43ac0bad0f8cd773deb6f15bbf097712c9ed41cbe40e490b52ac0385f109ad8f53de43e2a0440900625145db9337678c9571a3a68ee1c4bcb8efb92c31ccab4125", 0x49}], 0x6, &(0x7f0000001d00)=[@cred={{0x1c, 0x1, 0x2, {0xffffffffffffffff, 0xffffffffffffffff, 0xee01}}}, @rights={{0x28, 0x1, 0x1, [0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, r0, 0xffffffffffffffff, r0]}}, @rights={{0x24, 0x1, 0x1, [0xffffffffffffffff, r0, r0, 0xffffffffffffffff, r0]}}, @cred={{0x1c}}, @rights={{0x20, 0x1, 0x1, [0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, r0]}}, @cred={{0x1c, 0x1, 0x2, {0xffffffffffffffff}}}, @cred={{0x1c, 0x1, 0x2, {0xffffffffffffffff, 0xffffffffffffffff}}}], 0xf0}}, {{&(0x7f0000001e00)=@file={0x1, './file0\x00'}, 0x6e, &(0x7f0000001e80), 0x0, 0x0, 0x0, 0x4000000}}, {{&(0x7f0000001ec0)=@abs={0x0, 0x0, 0x4e20}, 0x6e, &(0x7f0000002540)=[{&(0x7f0000001f40)="e8d2c3aa1df1032adfce5660b682c9124dafe604b8e4a2bc2ffd7252deea621647eb8a534511086f67b5d22172fc11888022981b8f991e38109db1789767aeb03b2f70fa8e201198850be5448582a6d66563c7744a238044c526281d4d9450cfb083b967afe305d7e3c928e981e551ec090c1f95fe9838e58dca07", 0x7b}, {&(0x7f0000001fc0)="d5762a1916f1629d709a205186c40bf13c650ba174f11068e0bb6cef331350f4c5ba98f635bf9d833612b7cd47d90dc04a8bfaa0e9a0264e8dacf4469caa7f048e541caac3bb8318f30494fda66f95ada221a40d613f8b398d6b1fc34728b60bbe0413b741b8c0e88927b944c27953019728bd32d12d5ac10749fb3a1c4ee35da8c5f95ba2f4fc8b9454f636d64c090710423ac17e37a815dbe5c2", 0x9b}, {&(0x7f0000002080)="14b6f6", 0x3}, {&(0x7f00000020c0)="8db96c3112b7496d3f825541991555bbe19f7394b82d8c3b36b7254845f4b707f2ef49c150ba8bf81dee63f6476f6e82acd5ca", 0x33}, {&(0x7f0000002100)="6ad44ed168a0d66d72f80e6b05e8a3b997cb8b2bdcdca2be3a3c7cc6e476a7afc38aaf88dd89ce08df595f83035a069a6744c8a995794b9d05af913911", 0x3d}, {&(0x7f0000002140)="ba40ddedd0d4312c127b6e9840a85676a1d46c2ae2b3dd5d6711ca294d38d944a0d203c6a2303175c5f62a94d1398c22ca88178ad68a660de6fb1095e49c03882d9094df04f0aff8743ff228788f2fc31dcdadea68ae928b387d52313c13cb4c6b3b9c21071d52099443cc7cb0e25379c251eebeef8a37fcfd171ed81716de60a904eef5857377d2df1dc1b0baf73a99a16b5f3952dc7651a741b15b16a5694d240436f62cd16a8c20e13b8e00b1968e688b5b315e422d4c756a3858c9cfa5fcd1ff8b790107712f42322cc9c6cd3cc50bc034045431b3231c59fcc74fa8a4d3b70a1d6a153ef2eaa9c490c2d7ede50f6596ac2284ef12", 0xf7}, {&(0x7f0000002240)="75ef1c2193163719bd5f11442a87cc75aa1f5a230d8102a084626f76aa978fd9f0b8bbc0ec1c9dc9f2babf33838ee121797a0c520fbc0dc8bb85abf31c3a82e58b292d091f19ecd1e8ce25ab019026fcec7233553623dab454a0d14056bb9ae9300291594a9bb251e1fdde2d682a816e3defb472268219b2", 0x78}, {&(0x7f00000022c0)="51a84ca97ee3f2cb3e8e47c05efc6ec37ede54a7c0d3a2f10b1b725d507383a228951bdf405c4dea47625a5ce894164511e08908e8fa7244a0cd2291e67e89f8c4fa28bef10e954f07fa91c884da3e8e070826a7185e35e45e249c33a84c9480124aabf0c069d5dfa2b3ff99c8c448bb4ad88957aa1369f9625fed40f78c73abaf5448a58b6304568f1b714a9c4dd16acae4c9561c67beb16ddc1e0f5407205558b49909794a7432406077555072ec0345e41d8f9e44f5a05f7a84e729f5cf9661f1bdca5de75ef1b402d347bd51990f7afcfc94e2688e8433a3875eb47b7e477148", 0xe2}, {&(0x7f00000023c0)="36fee72f4cf163b7a12474036c6836098170aae8d0eb701f7bc3b1bc8ad03c4c978a278eb6c7541d78b7da14c76e43d412b52c586058d0d7e8d2f8298bc44fb756b73ef4934902f230777452210a5bd269d5dc190b802709d2fcc4d79c19065a1936860fec385e2e0885a64acd8e6b8a6ebf77a381bce3b742c9c8c604053ec6f1764cc2bfcea0219ed108f8e429c8b27124cd1a3b546af9021724319c7fca09062a0e62af83f177edb845163c7429e01964b5ec1e83fd", 0xb7}, {&(0x7f0000002480)="14971659348f75318e5669ad1e9acd0928c787f7cb364311426f056c388a55c61eb76900778c65949e4432318e05930b0e894c9b825a1fbe4cd7c36769f3dfd68821c9267409d185b0174adaa11b148cc372f8157be9d79121f5b04f92e819109b1f9bfb4127c23d3197056eb563d93bf6344a6cf1b5c7a4dc374bc49010a224aaf64e4a53cefe3fbe4c714d2ab8a50e7df9bab2c13c2a890d75186dfd569201b8ca0dbcf38bd5fb2f26b5be", 0xac}], 0xa, &(0x7f0000002600)=[@rights={{0x14, 0x1, 0x1, [0xffffffffffffffff]}}], 0x18, 0x4000041}}, {{&(0x7f0000002640)=@abs={0x0, 0x0, 0x4e23}, 0x6e, &(0x7f0000003900)=[{0xffffffffffffffff}, {&(0x7f00000026c0)="1168e28167351c1e14a541fa9e14c46a25ff5b6b3074973dff0f482804bba468c47c4acadb85406a6f00e57c3f3efb663876696baf7ee607d7e35bb41dbebd7293c45a8fb3f39d566a2f4e1b737278b8350c23b3f66e9f35fcd21011f640c0ab8af65ef6fd3273291b783fba62d1f226e55e754e67490d03b7c549a06ceb2043dcc882f38c35592ea93c6ae4770b028ec65e0ad2826c069f04d41d567f3d2a99861d639cab1bec98aa32709ce8f114c9d913b858de0a9282e1aab19ea6f3e32cbc5c827261d4", 0xc6}, {&(0x7f00000027c0)="37ed7fc6fffa6af83a12b85a677c259f7b0704243244c574fc80e3001125dd4dccb86db4fd3f691ece725c5f75e723632ab2ff9ebdda4b1f04aa925d5fbb1803b21d30d36c6a631dc9a7d715b1370e4e8b19931ba86f96d3fbdb7576b09ba10ffbd420c41dc2ff81fb035ab613e36e73f5a72f4acea986d5f7ee0cec6c619d4aa1ac22b823fd84630a111c4e8193bff43c3847009c3b2a2959da3def4ad9983714f642ad760ac9b1b404c1a58123ea16d4be55f6d6fbd23f4a13626deb46b3d064dae1b7c61fb0b90d56d95d3954f4fcb265316c6fd639d2d8a079d8eea490f5cbf3d4bf80b4b7ff1abba8cfddaaf22bb540fae983736fe9985f8f1b46748661b733d268cd68ecfd420d99bc3c75ec1632fa5f78db7be4418a5c0769a9f9643faf16269f20746c87b22d17e86ec788b788c3ecb820eff530c3aaa9a72405f8c587d4a94ee910a94a5fc2f704aa8974be112f17ea852da68da6d71def83af8ea14f43e4a1d79bfb6cce2a012a3481a4a7c0250222e1a4054a50a3b0ecbdfe56465fccf6c7c1dbf9030ff4f4f72e8741c351e9533d63ea0fed458b79516b9fa7643536d65bc406602a85c40ab28688d8a87f3cfbfd52decf46fc7fe047e6ec93ebfb51fd94f13d3349a9b5c3e053f31e962c18f79f7438b375139d0accfa90be234bced8a4e4270b46f58c96e87723a16305dde914802b3fad5e10b9da4306c56b6b6fa2ccdba959ce5fe7c7d9294a2b4cab589fbd881145b5803599cf166b1f298116726b7a6a41424b5dde747049fd395c069663cab9fb89ea4ed833d50671bb2825abdecceac809e91c89b43aa6e7a277257b9c3115cd06ccd124cbd5ac2fabac90f616e5f026e1a05f31846ba84e09a8546816e69d10f7ecb22a134f12ce71602ddaff624bc3d4bcad0325d3e06180f95e6ab3a20dfbfdaea25153b2478af3e181bdbfa7c707d8941ca12a897aee337ec9527ecb5bda5c05fa3bba4d2897d47da00019bf5260ca495735f42a546088932b9bcc3e77122a37b5a22f42f13dbf434fd11b9cf99bbd8630ec8b5fb984fb5b92ce75df0f506420337be1c10f5bb659488a5f1f4addf4b21fb53afe2d1d9a29c06679c7aee9a3e1c254b21fdc1a90268c0f66dd39ca530c5992d3230a68d18eb282e141b43c19e2c535cd19013ec92c30c6171882b81545921eb6ddc83226fb220e2e523cd460687937d028c1089fd54a8d48051e130f13668e6141c1c361a60c08d191c931b13ea8c9db7dd0e7fd20cff4888b5225c224039b99dad8c957354ce5fbee577c877f6655282bcca6e855a2ad53c44b575ac0a5a7fcdc27059b23f86b42ed4e55aeb0b10c24d9171c3e76626a2a19c6c7a9860724703bf2d057f97fc2e2bec59cb06ef943f2f2819bbf5489f35ca8377b221e2badd0b683b48ea9968c3405abe6e149ff82582b071eba3131ee927c4039d5292ae502699ab7dbf25545dd3def90da7393669c27911918eb275558b6fbf79259a4a2a7a53e7347a81e2c849e90aedef016258d63e22db3a0675666084dfa32a679adacf1bb6444a616454642572f1726fe6d61bda3339d969defead8ab0a33483052611a530db3f7750716594ad39344ac36a4cd60ea44771877833bd30c7d0e1d1c39247b094304b733bf4286dc0f11fd3771a04c93f27ce02bb2e5b76065a3081a90ce126e1d0ed03fde699152232ce5717ca80cd2b22c190e51331430f50de23678cc2721c592de7951c729beb35cb11b74830a643085d47e252d8299595e721f17d35402951ebf3d9569a43429c5707e5f1840779610665534fc0c847afdea2a0bd3193a9c8384b0b9ee569c3ecf65e4332a006e347bf36f052f9ebad931a88791f0871aac4486f493d0c9252f48fcd4e599e3fdb696793ac97d394e350edda18546e8dd89466edc367659b21a65215a6fbc4c71be8c31756490204be236dc396b8fda3e5d5efd17b698c24fecf59ce3723d35e4344006dbe54f249de0ecd77f5aaf353279f7faf9b83778edef4343ea353b63443674cace4c42fb9c21a22fb41539d3728f148b0f7defba18c9dedbd94351d7b144679b1944fd7ca1658abb46b23d6c6d78be35eaa01a65c56e9ed7bdb348b4254294876705a3ecece2b7f74af64c4d62997d6bcf6d7ca90919c0b39a4ae3793d5737cb1302e1c0fef5cf6c6c5b871edc9a8e7ca9438251bb352c0a0f0423ef57b0ffeeaec980b5002246f4d62026fa5868b43ed52dcbcc4e28d2759b7a138c466f3537e39cca4bec445af5cc8a684e72a64c23ae3f74631a97349136cc8c65a20fbb34015877af2fdb454b24c30974276314d20eb35420fabe759d7860fca8f32984b355ceb29054a890984489b0b07c8c536d8b7976fd028ef61264e33cdce8d3e14e61020631aa9368f1f0dbf19fa7ea4cb91111a47112c2200f9171b96d70092d0375e829f9ee60f9303cf3975e8ba8ba6fb2738fd10fe512c05f141328b0e59135b8267426e2d38bba11e12e52f82af9c847a98b80fcfde0ee69ea42e03fde7aa81eb524dd0fac21db43695c52974b2877aa7d9433e42bda40e8156759d009b39ccddc57b247c9176282dc2c45b4e98f783f76f33e685cd46268592b8a2ade3c326303af6525ad9bf28c6ecf1521642e73cd4d41fe75b1b5d955e5cbdfe4820da257ab4cf9de18815f9d09e6c60125936beeefaf13eefe41297e4ec477a9a35683d73e510efa2c67fc2f69e7175437b078472597d7bf9ca4657edd6cae6713068d3608b3c432116b9897abcb19662f42be84d75bb84c10b5c260ad27625e331bc11ea132ef54489e597b204cf7cacf0644ba0daa78e5e4fcdda37a287b93cbaf955501982cb1fcbd911a0e4946a2a8df8beef09e6115261775f04c4573886cf4c71489a427a29f2d9403a2bdb56f536a8a250d6f3ca6ed87fb1e95400d97ffe742bba7f935996ee621f32682d5d28e3e9e3de7411c21e5a0d416cb9051874871f77de363df2120541e58b5a68d24a74a0b9fe0adb97ee560353c70e80c6d59304275ae0e3ff4451e2234dc03a87a72feb6e6dafc072e0b229164c0f83d77011442356fa5af3634f3174aa4491ddf5de12cb1a30aa037cda1037742f52d868a6f94151170841c199271f0e62cdf55b437b5cdab6cce5b67a2a26523106d2922634317fb868b94915bd98669214fc572062a7572c22daec9e616b977c93f204e835b126c9dff21767c4166532ba508e6383b0fcbff9abaa80e217457eb321983f6418ef42249f47e170751e1ecec5dd42605d9b8fbb6eac2afbe06bc0f2ebe9c57df724e075d9ca01ec3eeb5f3ccb6516f99b64a78091bab2f4535074f33ba64c3f7c4b10e24f0ac9fb1b6441d887c8011b9a009635fdd020ebd099a95e56c9e5068c0c4bdbc65a0c1a49f99c3805796d0dabf09e2f26ace8507389c36d3b22d0fcb923d8c43d1456ea56d9005ac27077953b928ad0d0b0a965b70762f73fb1b90452f6008304dd2bf2e7146581792b5fe3ce15610e636d2f3e757bcc6106162568f601aa44be7ddc740ab4bea1293bc8d5133c60f7489d5bbd9274ae1cbf31cf5dd68c680bc5916c6167fa08f1d77d1c7ca81aa445c9bc6dcfa1fd20292b90e746e43c38f32ac2640a5cd407152615b417a18e9ad6e5aa94077b579434a20799644a70ba84fff881c2ade52b3dafec5e112ed1dc7ed41997dc23e1730879765e6c49a4e772a05d83d4a547f4df1db9aefb2e86313244a072290fdfbb7a5f570482542443106ff0a961260c6500539d62875752f0063aaed9bdf3ae0a65a803ed98c82b93f7b6150f0792dba710961f1cc4db3d459ad2871198470fde28df598fef7361c08cf2d5f28ef111703531b45263025dc1a8716ae08fc9f5a52c671c2f774ebef96042b54f427c35a64418360183c695d22d3560a6e2b01a3886113a7ffc2c0d87645a97ded067e7067c028d448a6817367b889fa193d5975ce15a8e665dddcf22e61762e38cce14bb15746f0c44219b092eb9a4fc8a6c9ddad9a74c49f58769e754100b583c6e1d6c909bc26a40cb76a385718e59ed5a2a791d4288221f37e8b1ce863bedbe31569bd396ed0a1f6909ae931d213fab2a140b16775d1db7da60aad07a2610f7472e9e7157a1b7621de3efa1bc8aec99dcf70f50fa291b69bc47d8c9d04c8b18a952eac5dabf0d78fd7fbc25a26919626807ded2c6d25e891fec2f84a5ac822d0c98d865e0d06519f019a7ce5d13c1d3289a7fc70f26baee7f4e3988027d753a79562dbaa5c419db77ecde101d3b15b2cc4f92d176e932cab7af5a0f318c5afdbbb0750af698dfbb8eb6ee6825040d95d7cade463dcb81a770789a4b48a49298b84b18d61caa7224634ecd8653369158f88a85c13b99d7f255cbefb79c9628f73f2b3ef40560216d21f8513ff17bc8faa11c2a1bc25b2040dd23826916ddefc1e7631c508a6f04da29ec3e8b8af40b9fab617fb7648e23fe4ba20cc965813b9c7f2129312c2b74cf367547907b1e3d9c4edb633088f11601e2d868dfaa4e4d8a7ef834af3d483a3a4b88c6acd37818befa2c6b544c392b472de4e25c73264dbeba5ce6fc9c65cd7d1f411e743517d5920cb81337bcf562c94e332a07bf0684e7df8d9ac36471ae12416f546436dc3b78d883cbd5be836ce1f70bd494352c32b89e736028f083ae0df7fd4a3c864324a20d99e944548678532d4f43e14e1792fea757f0000a903e840542110ba639245c8e5d778fe23c21b0d36caf004b92891d9e9ede9e073d2dae8827e915283378d90177a1503e856c4d7ab95cbbd7c05c20d1b7982ca916e5885803af0b08c861c559345466eef0ccd2994cd245ee6e70c9a45686f42670c2459954c0b67e521b982a49fac07d76110c292dc967abc34aaba9bb6bff02f331d188865dda6e043ba7b4f81577376f677222fd474ce8c1265a0585fff23146489450cc880f8db2d167d2c5635e49ba2da7edca21989a560f1cb09b397ae9745780ad2de4e6929b9051deeeee94fc7fdb6a6a1a3934a0b1e4d4cbb3499684bc0c5a51a331770d4e8eaac07d56370b5f0ccdfa1781efe91870213ae5ffca24c9a0686cc51da1050ffabb99172f0f26c57eadd23eda13892b63ada2ac181d540c07999db18ccc4b5155556d2170458739f7e54de2272aa1833134998672a7e90e30eb04691171bfd55623ebaffdc5e83abe6600f24ee7bf9b511a10720a61115115201156d3258c8fb769543a27b28dfe731811791c5d4c97595ab3694e00309ee343048a6dbae364cdfbb7c3d73e5a23340f050aff74a85dc6130ac158afe63c39816e5188c240e450d1bb853d5470654494365d95ed80b94f4088fbf69f79ce79641d355aacb6d7e25de813dbb26bff864a59060d8f24e0261f5a038e2bdd36ba16e320b71cbcc960def1270785cfb0033a4f340fb4ad4b0d48f7a769e29cd8dc2c0f188bb707beaa7ce95e1dbcbf8782feab86dccca789f7da3933ea8370d4e7c01d269300ffb4944c217e71d6a0b0ee0ea56245b1e87e19f3c8cd5a70c93547ffe2982ebd6dc6253ece72cac73e61515e3a485a5f9942c3fd8bebb0086626cec3a8220df520d3f7df7787f1fc9d3301f89cc0671ec8059d460822d82effbb9d0b400a9e4480c5705ac264b97c959cec6e0c5136357b3f959fa34ff070453fbb9c12a415f83d1a131f575f384790a31ec2de30a76730b1ae0528427963d542badde90714143fac51698a73cc64159acaa281e69d067001739c095402eeb5adc10ec0a6e06b835087db176f8ac58b34a", 0x1000}, {&(0x7f00000037c0)="a7dafddd3137664e12948dc084463f6c7338d677ea2d00e6b05a39cca70c9e630b1260d06cb5b1812c907311718201c907d7f6847ebc6b927c8d7034c3f41da466b43c", 0x43}, {&(0x7f0000003840)="fff6b050682bd1b4fa1d6d943007718da612ef465dfd5f33217647817d5edf76014ce4cef90ded9be2347e95f6f93fe6d04aaeae6cde4c490f2824587109481aa8b9fa555f8a4dcafaa198ca2702d6b0a7788785f58f790eafd67253fc7c20a19d1451ca28ac687f0d71dd2e61ad163814fa115e8ab0395bc8e619bc5938e34339161b04998b5d32029a16633f6e82af23f5ff5075", 0x95}], 0x5, &(0x7f0000003a80)=[@cred={{0x1c, 0x1, 0x2, {0x0, 0x0, 0xee01}}}, @cred={{0x1c, 0x1, 0x2, {0xffffffffffffffff, 0xee01}}}, @cred={{0x1c}}, @cred={{0x1c, 0x1, 0x2, {0xffffffffffffffff, 0x0, 0xee01}}}, @cred={{0x1c, 0x1, 0x2, {0x0, 0xee01}}}, @rights={{0x2c, 0x1, 0x1, [r0, 0xffffffffffffffff, 0xffffffffffffffff, r0, r0, 0xffffffffffffffff, 0xffffffffffffffff]}}], 0xd0}}, {{0x0, 0x0, &(0x7f0000003f40)=[{&(0x7f0000003b80)="70b377c70387f644a405f4f2e35e721b7ba54c302fa4cb17a4bc8766e7374451fccdc1a1c6078e2fc17047", 0x2b}, {&(0x7f0000003bc0)="e6125e16d7a8df93b40f4fe2a2f1680aa50a616a96a529701dd568d52b5977a804fd8c0fd5437bd7e61314cb97c9a47e06edc44d096126476b022ab6ebb41765134232aec9e0ceea89ffd2787bd9f8e3", 0x50}, {&(0x7f0000003c40)="d6447e435b1de4c192b66a27a92b8b7585e864a464", 0x15}, {&(0x7f0000003c80)="129d885b801261fc14813be0a34b56d6978c91ce3f", 0x15}, {&(0x7f0000003cc0)}, {&(0x7f0000003d00)="eb19896a32f2d21ea6de7080c4d23e451c283764f16ca7ca0da11ee4", 0x1c}, {&(0x7f0000003d40)="f0b456d22c3170eb7297c7b2f3d042c7b6b39636a8a80935cc2c3e0c455da9a259260c91be5e35b7d8aa172375304515e081cc995206c07b9c77188a9fa4c077547d58994864e8b45d1378274b33063c1409a5e9537742343a604b203d606ec24b9694bb4bfad975fbb2c37c70137ed0e4818ba986a293ecb1b946193da9bcbb2efc24009ff0ee098d6df07ff2eb7c39a0eee42faf94e96435176ad6200ed15203ac993743e46a80736f7b25c69162f3eab0b453d787adebb9210b22ba11e6040ba7ffe97d53d4addeba80fa29388171c595b0c5065273f3875a254c0d9dd579953fda006e7cfb768d5abb3b6531116b9f51726d10b840", 0xf7}, {&(0x7f0000003e40)="e779fbbd622064ca2c25a9abef22da29971cec5f862e42a2d40ae66af52a2458448f496655aafedee5b7664ae8c21119267849cc7788ae2b1332e890d866c36da6f78e6827c1e059c9ca969647b7b9e8fb343232cbea9e39243bbe3fa33ba32aad6947033b832de2a25b53abfb95231299b7e640f5aef240da130e53c6fe9e162d555a223bb24500319269212dbdaea89f718af02183beb5f268a5a5e4e0edebdf0cfa9737ad6844b9fbd46c1b4f4b6d5cbcd3eed073180a3ac9f8b42c4aa4c4568e84dd690bd27e65c3a2ba299f54c3", 0xd0}], 0x8, &(0x7f0000003fc0)=[@rights={{0x14, 0x1, 0x1, [0xffffffffffffffff]}}, @cred={{0x1c, 0x1, 0x2, {0x0, 0x0, r11}}}], 0x38, 0x4000000}}], 0x7, 0x20048000) (async) write$binfmt_script(r0, &(0x7f00000000c0)={'#! ', './file0'}, 0xb) r16 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) r17 = socket$nl_generic(0x10, 0x3, 0x10) sendfile(r17, r16, 0x0, 0x100000002) (async) openat$cgroup_ro(r16, &(0x7f0000000000)='cpuset.memory_pressure_enabled\x00', 0x0, 0x0) 01:55:27 executing program 1: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r0, 0x0) accept4(r0, 0x0, 0x0, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r3, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r3, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afcffd8d06e01000081c5", @ANYRES32=r4, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r4}]}, 0x4c}}, 0x0) [ 1978.022368][T28267] bond1023: (slave bridge987): making interface the new active one [ 1978.040254][T28267] bridge987: entered promiscuous mode 01:55:27 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) socketpair$unix(0x1, 0x1, 0x0, &(0x7f0000000000)={0xffffffffffffffff, 0xffffffffffffffff}) pipe(&(0x7f0000000040)={0xffffffffffffffff, 0xffffffffffffffff}) splice(r1, 0x0, r4, 0x0, 0x4, 0x0) (async, rerun: 32) sendto$inet6(r4, &(0x7f0000000080)="202179ecbced66d21004d86c4827983d6ec102ae843687184616ff1979", 0x1d, 0x4000, &(0x7f0000000100)={0xa, 0x4e21, 0x5b, @empty, 0xb1a0}, 0x1c) (async, rerun: 32) close(r3) (async) socketpair$unix(0x1, 0x1, 0x0, &(0x7f00000000c0)={0xffffffffffffffff, 0xffffffffffffffff}) (async) r7 = socket$nl_generic(0x10, 0x3, 0x10) syz_genetlink_get_family_id$nl80211(&(0x7f0000001780), r7) (async, rerun: 32) r8 = syz_init_net_socket$nfc_llcp(0x27, 0x2, 0x1) (async, rerun: 32) r9 = gettid() (async) getsockopt$inet_IP_IPSEC_POLICY(0xffffffffffffffff, 0x0, 0x10, &(0x7f00000015c0)={{{@in6=@mcast2, @in=@dev, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}}, {{@in=@private}, 0x0, @in6=@private0}}, &(0x7f0000000c40)=0xe8) sendmsg$netlink(r7, &(0x7f0000000cc0)={&(0x7f0000000540)=@proc={0x10, 0x0, 0x25dfdbfc, 0x100}, 0xc, &(0x7f0000000b00)=[{&(0x7f00000017c0)={0x1450, 0x1f, 0x300, 0x70bd2c, 0x0, "", [@generic, @generic="bb10ac49b6e01b4b5887437b1b833c70a02dda1bc1a8181ff2a01e24cd43c856869b0928985342ba5044cf9293c9237d154e8ada0bdb3e2b50d32498d9e3c642c2be6c1117f98c5d3b02fe6d5ce17dcfee7d9f7d65cfb2143fd4b24763cb726988b32a83bd3b1c31e885e0c44d7a84cecfd946684fc74f869bbf9ce2a1b951cbf759f966353171859f5302ed3fc86f858a09d3", @nested={0x58, 0x22, 0x0, 0x1, [@typed={0xc, 0x23, 0x0, 0x0, @u64}, @generic="bb3e165b0ac7bdabef86367bffafd893067668110d1ae7a1bcbe3736938ca4e6eb37c458a159fea7bf2b54452163d92bbc3bc15147171c2bc1293dfa7a6229dd122c8840f74a8a68"]}, @nested={0x171, 0xa, 0x0, 0x1, [@typed={0xc, 0x74, 0x0, 0x0, @u64=0x200}, @generic="8f3a8995bafdc3086ec720af96005d9616771cd2a4e0eb286186722b8b544e288a78ff30a91c334cccbe376d7b25a1f750552333559e796847cba0976b59955f", @generic="a9cbac955ccc16ddd9ea0642da22044866e42c49cf282312d2ed48a15844da1cd8f077f014496e91d86fe88664b1066043945e50861fdf70f9d6923f0bebc68b8341202b1c0b69f821d63121c628bc2a0bdead1de8cf", @typed={0x8, 0x1c, 0x0, 0x0, @fd=r8}, @generic="9bd3fb1614f1cd6c93d93617f0910b6a8dcccff4ed22e398f368e9ec08379ea220f86ab1dacd98503dc3947ce6e3107d462a8f57f69e9a0ad820ed901cdd1e0e0cda5f3708e360e8caa9f17c25a515554f13a885f37a19c95e39d4d0e7858b828458910f31c4255c5cb08271f46592f15f39f432212ebbf7039fb1c2feb0ec270b51bac2ef152d4b45591b73707993fdd8ce2411b1f51bdf37f9e0df6c1ea0f92af5cadf5cf62f5297e947c55ecacb9055a6d317402a94999677aaae918a2b431296a6"]}, @nested={0x11e0, 0x75, 0x0, 0x1, [@generic="eb4d67d1a7d75bc02f3919c92843be72ade48518a7cdce94e712d7cdc1ad5c9eae69223883e0d3842f10014421828eb4bec7b1de5921ee1dbd8bd86fe7a10fb2883cf255a8eca470dd521b9ccf5b3d3263048500fef8d2420e9f803dd346504eaa0a605bf5b411aa55728ec0bb64ab81af383f5ad920332bb44ec7cca44587d90f109cead651dcb70c63c661", @generic="a7342ada65508db7da2dcf8e44b6d6c979f5e95c81611dadf89a8e1f27bc2e59b4565cff40e77f4496fc4d67ffd0f38849f2da21ffbcf169f77d0daa456a35bd94c40fb9ab3c94153f4745215e3034b48a85dd20fe9e19b898ddc5bc61e107727977f82b53f61f09a23602dfb2b5bc11c64f306636aae47d286f57cd68251a5055e189c6d018", @typed={0x4, 0x23}, @typed={0x8, 0x8, 0x0, 0x0, @fd}, @generic="5989fc9f66a535b8c627c8b3c78ca6113c21ee025347e78c7974885d9e477092278896e6c0738330a10b67f1f31940c147658f715c2d5728482b166c4b3379bdeab54eb22b5670e80f48985baefd447f8a6877409dee00de087daf87f3a79e6f31efa05c7b94570d7259b877a3e3646d3065af417b53c282acbe24b229de7d9e9c6de6bdb1abf5f60919086dd5b2d96b750baa52c052d53a0aca65e3e61c222300957dde43873e28820a7669e626cdd1a3e2", @generic="bc4a037c5a4e9b298c4358adc01afca49ba5b3235795d1cca1e6ee06be0676d4b6655eb17c0300f198e8ebca41f43e6106d7a5a6071153575e4929c8a3415479c43489cb79ce04700749b9e50895990c02ad6f4c89d852a036fca3f66c1fa2b3998e12dd0ce3bce4f2fe7bd769437aeda0aeb83451d0488dd7603aba6a460893a4f89cd76fdafcd806f10d83f4c12e1fc54f90ac28fe9b9c5c34bedf6c440370f3084e423037b6193ad6b4662c7729c9d9e05686f422206546c33b3e8c2cd2bf801a570b6b2bfb864363a24077b5feeb85b41e7761d0cf089b6d3e5a0118676deaea4fcf2a4c33212379a610bb938361d0eec0115ba61cef4ae383f88d1c25126e62a37910c15196b6b5128779ac02baad19cb649a074635b7ac6a15db1acf53019ae69ba39c2bcd1a08e6bef9475068aed7651aa3e6fa7dff432e5500ff880271bf82e13a1d519b9c80a85afc57ccda56e43227e53167e235c3aa9aec7b0f10052e01656b927b88a316af59b27ee6f2e4ffc5bdc2890b2b3bc067a1f1b9985c76677ed1a7c9c27416a6d9864404ae45ed1dda81941a827bc75ba4fc49e984b1fa7842f1541ba40d140e8e47db0baba8a6abce760ae3fe42bf4b12d6552ecb71b0665acb0e51c167d6d9eac3d572ec4543b0bc63735c1f14c704ab07d0e8112b43aefc498dce1b02825957f4a9b443beddd2fa986886fb3166a3040e87fb307162aff63dc1e29ac48eba147298ea97ba697bd5fd31886434fb23e57737ba27567c2fc58a884d3125b35648d8fc99cc7a48f7a4ffa31072dea7867ef2684b07a33ee9e12f8086cc179f2243c4d3de43104199428c57a30377686e238be3c11b88255a618d76815bf4268e706bb5e920aab5b85158fb3c5f1c6f921c6e6029529c5742b042c1c27ba414268ba895301b9f331b0d86e2fb2021be406e7dc7997e727976e95450988a89c58040ac92e2f6875e6ac4cbc9a6bcda0ab805fa667dbf4437b989da59c6c8f1413b237c19cf1c6c1f689aac9b57de98c81094550add06913edf872b3e6f352506d4879fc9fe08d67bd35a2548286a77364038c338ac2537c62c152acf1e9f35071b093186b1c50a52b835eb60e248e087c9f9a19e6baa17d4c18740292d0ea8989d645d2c35a6ad0b9aa6275a5755cc93f2b04af16407b66497c7ce4cd73295574e963985ecadd891fffb3dd69c7617fa221ad1e0f8f1b172e512534ea83ab92d7af69c37e67bc9a1f1befc95bdb613a091a44b85843c21459b53f4f4270a7924555116b5b0920174bd5d5428bb3f7ae8427c8ba81d338c5fb6fd48a33496bda1669a6bd59373d557ac9dce86b87bc04d701b2d27a1f2fcb55fd0305e452e5ff0b911b7784d70a343fd764b6465f203b1a8dc1435b22b93d39e3a42755eccf43fa196d859b823f3ae19611c45c0ebfc80aad1d1894eaf024fbdc5ae41ec367f86c48714cfe355cbe294a21f0ece615e811af25f5f52227571ad84443c21491eba718dce148bb1edc89025fa9d4942d5ac43a534453e60e95b07e434f67659d725eae65aff3422e5895e2f2f54470998395c5c1d35b65e0f87883cbd1f706005d10f95b3bb78b35b2d47e244dbb3dd35dd12f22d575f3cf8f8677b4649c6e26a6ae969a0e452ff8418e99bf7056605b100a347ba62bc53d1ea56ecdddaf5d2de9c2b074f6aeb2e7426a14355eca7acff2bc4f27d2cf11c992fb93a134fe1aca14fbddbd116474ca8b407b8cb6cbd619ef20d2772e1caeac7e6f43f370ddf09072a3c51724551515fafe1241c3d96808a34ae1f5e72d82b9d69986eb1b4d04ab07dc15664b380d50e6c14888f55fdc640378bbeee928e278d846b6efbe6d6bfa299a4960c2e7cbe42de0c59209c1bba122e4ec698ab1a1df0cd50bc636ad36e48aa34e26e324e15dcbdb62d2c5c60ee8f175eae1f07246ce8119e89ff4ef47d11b4428de3cffac1b1c8bd739d9f6851e7cecab0341f6590871db4880c72ea5b75d3416f1e62fe0d47939f8fd69e9e93755c1a345a04ec08631cdfb8e8ce37fc5dff96dccca60f95e5d0e593c379510ccd01b7fce8485b9da05938f591fe2661d97a22612963a8babd8aedb5cfd006f54d32f30961555269914407fcadf2763786f34324dd2273cb5e85434868f34d59f249a6c8f99079ddb015ac2ac851d45421f1cf19f4f2e5ad83490766f4693090972098ee7e06a57c8b6e3d44e7d4845db7fa4943363c02316b8afbb6d261d9da714a76b9ccbe5a0f92e0184ecf7e6810f9b4c5272050ce0ecc0a2e6c022455cfeff27c056cbce250649982656e1992e075549f737e025cf04e7db3bd97ceaf52fe2ba6a2ed35f170325e4c02b0bee9860b595c98c4bfd152581842af2655b4c008b87875b55dc3e7694f78d3854cf876b8604375338f27d191c24c0f0d30d7d035ba5cb41183051f1c6e0da3b333da56dedc966bb860fbb6cae83c7dd044db5c3eeb6c4fb989a0a1b3250634ff95b3715f369c94eeb354d6817bdffe0114f82838293bfd86997644b6b47220235ec62bf61d51f632e5beed03525802176ffdd380e6e34d46e50e7fdd3d841ab6d516d1df55fc6904c6534e30922708b0a536224fc521bc240126ca1760ca32c4b26be004ae9ce9fe1d0fb9be7405358c65fef345a8d32499f87c6b776478e021d26e064cb049e1b9ed91f1bd782211ba6ce0d31800128e050c2ae974b9ebafa46e8d51eca2c2b2d069f3334e3eb76c9ecb78fbb5591ff444dfdcc120a9d4a619f9c3fc6b3d9be6a803729fa51f7da530e63b888d170f627c58fb10e2304b4b5d01ade88478473f830c44b6e7dae1aa421b84c225a98cca8a1df182d37e7148efba9f502f7f5b8bb2b796e83552b4dcd39d2829ce7a6bf699dabcb8f36134c8cd38115627ce0608a4962f5c149c48359a756b932dae1a393e8d825f473c47ea003ed0d64da2480db0eea0cc09caf353ebbcca6304d25e5ebf562fe492388fd4fb27dcbc1593e9ffdf97cd844f5ba93326110c5d2aab77957977fbbfcf6a9fce8660b920e514c1c7167fa600b57eca6c4a51620627e49ec7e75e691d60fabd09e53ec997cd624449ca073e96747070be107d48ff9c635b8e702a788614668d86eb07dc72af7b81404512eda8cb4e15d6ca6ec8b3efe74207a38b54d6392784ebbec3c6b0dc2d8a8d65d848498d96ede31a0c54ab142b1a3575747c761533dc92682ba2901b8242518a5b704a9b162e979da47ba808a7f511e8759fff37ad263929cb14e18a11ef9f86c0ab4d7b3f7141f8a50947108482188ba9b54cca223ffb6874cc496fc319d29a39444c4783b9172edb9b0fc2baf10196519053f10ffff2e99e2d2a9aa0664c11142db160723790e9f34f15e65190709856dc8339c17bcc91ec3d037b7ba1a97a3865fe11286cb63d83333a07c86107ad25fe833d399980426022415b234ea28d3aad9aaf87036eae1b60195def1e684af0d256f63fa334707b5d4dae394c5aab80e436fa1a9b19ece9fb211a829f1a39c27e640d442f36a93975156338c8fcf4357d124f5b393e589e96deb0efbb909a90c2bd0378459252a85e414ed6deeb9e69a8cbb5cda07b5ee12658e93b768630671d90f370bc479161a0e6e5a48e9e155a15441fb552561f6453a2f999736bf781d08d7ec78bce43a58e8dc058c40624cf609c26accc8117ed37f1f2fc8ea4f08bbc17cbb9cba9d2ce2cca8f1e45a3b92d39c5a6a5ecd28f12c8fba40596ff26cecc2013943a04c9fa4977e3ec593406c8b2f12081f61542fc806ab195b5df37fd97ac3b58126df40e5a81dc50d612f64df1084e308d853d0dc42614b9810df9bd1f3fd33f5cfe958bd876ac46eefd00187422b19f72c9457cd887f825cec6d2a740b4a199e82279a60ae036dc4ab69adfc334e3d7b079870b3f9cfa02ffb05b5e52dd0c0c641e7d21356341f5388598d826372d7337560f9aa7fba5a9654f1617efbd3142548b833a54d6d36a7cbc991d10e9df084687b65a148fcd28ac322969314e6ba4a11019e6142fcf8327d9433fcb5d236baaa5db9750a2b9db8d397aa14676d0033f35adb97d5373354d18ff60d6572df950694d4d773572842f2e19582c7173ccdf159da3f4520d93a51dc0d11c476084d632a9d2415afb6605e5debb147d135f7d5f8a0e46ef2ecb684f197c4adf30f5704149901d1c3cce526c6486a0e3184d2376246b375ec67b069321276e7fb10acf46f2e5a0bfc73cb68753dd9baee32d6c03b5828054039acf49fe37a551dc0e5026a5ade012a998e4f7db9863016e7d58f2d9a94d9bc048a58dea08f90265107a133add3b92525f5d83766a2f1d76d0a8a7d26de85534f10410cffb4eead8cd5b40757e5c6f587285b57e18821d2d0e3ef0c02b6f77c89624a067a77b81aaa4e3f9aeb0c5c51e8b13c16906fdcfd669fefc4d3ad99f7997c62ca2236cfb43707e9d17d411678eeabb7e801df1c9c7ea41e2caee335cd9012244892f9c4c26c523ff7af145edeb81fe9c9f905f9a0eeb5319d7376002b2bf6c2ab43f583a2ae0e1954cc1ff39ecbc6f59dbd5931598fb9e4310755aad3a82710b9a9e4c47890420b288ec67db645d7a2c61ee488114eb0e99403c5d84ac2171bd4f06cebc30da42ae0036ed31e9b87a074695e4e1aaef7c4535ff2fbc627fabdb44cda1b231fb0396ff9dd0615541559e4b0947cc41c20f109f1353e7d9415d5e9beb8ce0bde999ee24abf4e17b03b8d6b80faff084c0efd5e7805e934ca3291f95414c66f5dd87c6fb00c76979c8467fffba84888e0fc3601be41306ba5711ab9a903d2a430dc00d97f5367e48dee6dd4392e19e3e97fc01d86baae5d10aa6549b9006d0d20e71f73cde7e018485334cadbb7e08f0c7d63de58f211571135a42dc6bee8305b9902eb128da19dd3ff9e7cbfbb5293e6a158f4ec914736e3ad56fb9b12dc10fe965eb9087abcd316ab08b63c60fbf14681abd567d3e746e097a3bfe7d36a806ea1eb86b829659ebb10650796384e21bad3acdbc46251bfc52079383907414d61b31c5d5521c6b8ab5ec0f62e5b0b9005571270137fc97dc5bd577c6b989f8d6110801f63fd21b2490c8b03b3799700e29ed4f70a4fff87b4a4db60e5d618cfed4e79583c792072b309b04b8f942bae34ed06170489c87b21e563187f8bcea980922fe3d5677666518da8716caadb4faebb5927a7f4a6b704defb5d8927cb96c99d8728035da035857c0246c1ead33df11fe3222ab564698a9269bf3293c28add3c085edaa543fa085d4c448d93ac5ed04b6de698ac754b0a36c9b0b38f6f899bd072c636030a35e29b8558cab7bf4ab50cb2fd9c4e384cf54ae3abb00133685376894af9c6a2af56da61acb5e1744935903f5f0cc84491857dbbb90d19eafc2fd42868ebc7f12d86bace36917a60c10745e413b30ff56eefcc086adae2c3abe2007fa13691261f4578837650e3b9ee4e5aa493e53c47e3d964d18b709f3c94b50d8ed855ed6e9b0f552934bf23f0c7b7b1a1c95b284f586d60ce0d159ab74b5e6e4d0ff40aaf1c908bda13bcec4fdada719c112913d51a7c2767672a3ab6a9e94d9da3e6f61c66e2823a7c5878201795c6fbff2dec827a781765566b984a31166d7fa33ea15d00edf28265578b1ca377033dc6a00b10f97b1316f5486037ed3ad114dd39f0810329780b02eecd7349096bee0970e4602ae047e53e6927b4d031d48070e58c6680a03a05cf5390803fd5b754d23d0b2dc3d50af8285bae21dfe2487b41ecbe56289113fa7b645fadd19", @typed={0xc, 0x75, 0x0, 0x0, @u64=0x2}]}]}, 0x1450}, {0x0}], 0x2, &(0x7f0000000d00)=ANY=[@ANYBLOB=' \x00\x00\x00\x00\x00\x00', @ANYRES32, @ANYRES32, @ANYBLOB="1c000000000000000100000002000000", @ANYRES32=r9, @ANYRES32=r10, @ANYRES32=0xee01, @ANYBLOB], 0x40}, 0x4000) (async) ioctl$SIOCAX25DELUID(0xffffffffffffffff, 0x89e2, &(0x7f00000000c0)={0x3, @netrom={0xbb, 0xbb, 0xbb, 0xbb, 0xbb, 0x0, 0x0}, r10}) (async) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000540)={&(0x7f0000000140)={0x10, 0x0, 0x0, 0x80}, 0xc, &(0x7f0000000480)={&(0x7f0000000240)=ANY=[@ANYBLOB="800000001a0000012bbd7000fcdbdf250a800001fc04fd000001000022", @ANYRES32=r10, @ANYBLOB="14000500fe8000000000000000000000000000bb730000000000000007000600836b00000c00090080003040", @ANYRES32=0x0, @ANYBLOB="0800100002000000"], 0x80}, 0x1, 0x0, 0x0, 0x20040800}, 0x4000010) (async) r11 = getgid() sendmsg$unix(0xffffffffffffffff, &(0x7f0000000580)={&(0x7f0000000100)=@abs={0x1, 0x0, 0x4e24}, 0x6e, &(0x7f0000000400)=[{&(0x7f0000000180)="94942c3d1e007dfb8404de29a8697799b1f5d6823a70813d4cc3415c6f862e8ceaac7242aef16f9f7c571f15aaacea204d20b49c43182fe1dd3de88c4a06101fc1f8d6139579492cca024fe7db0bd605ad17f17bfaab7d62fb0b847e05f9c41fbfaf79a513efae1ba322990f1327d42eabce0b83ee4fb2b875a3c4f9a1b2", 0x7e}, {&(0x7f0000000200)="993ccb04b5af9377cad757d9dbbe8345526644635ab0ecc50c5c9b41303e1e1f5b1f6161ff3f0a61f3f51dcf5eab537b55b5db80ddea43032815b7908ef405941077ae8e58627fe7265438edb56ef1b6918735c74b3b8fb318d24c30d06cd07d15f385dfd52cd11a49d23837a38ef8284140bcc827accc91e3fb964378ab5da48352949a0f4b27797b96b083028f2f6bdb579e6c1ea1809c644b8e841bb7bc0eb312d29e9fea73a71744649b830f244576a3b1b8f50150c6379a7ada43987439be4e1258efbf5d325ee5f0ad6c9d909bd73a187d299cd9d782beb7a8b2524cf2b61d2dba7e4acf6764b73c9a034907cdd5b7f547", 0xf4}, {&(0x7f0000000300)="130127c0749a951379b88d7ac86bd00a069d3e5793db16848cac09380ca3c6045e088493f74bbdd96015c04cc03eae1802359cf0a739df19bbbc910c3256b1724713e6e5c4be6c2fd26afc35a60e33dc091785fd017c569eea7264d1416c4ee26bc35c2a3ee4c8f285c9da4f7d78ed6613140dfff54f048b51827b8380edffcbbc154571185532f83a58dcf55a3657ebb73d8a261228568bf32c1e5ed7a414a2f8b30a24d5b952ae26d33311c4d23fa6db921a7464444692273f8476e09803bb860b51baeabc34a7828152bba3533e16df0300294a425b07d1a38b122b7ca71b5dfb620963", 0xe5}, {&(0x7f0000000080)="24c7145919fb2421ddb6ab6620204c0441c838579e3bbb9693a8c127c2c88f33b33f1766e6b65d233d3216d16c5ebbe3342d", 0x32}], 0x4, &(0x7f00000004c0)=[@rights={{0x18, 0x1, 0x1, [0xffffffffffffffff, 0xffffffffffffffff]}}, @rights={{0x1c, 0x1, 0x1, [0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff]}}, @cred={{0x1c}}, @rights={{0x30, 0x1, 0x1, [0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff]}}, @cred={{0x1c, 0x1, 0x2, {0x0, r10, r11}}}], 0xa8, 0x40}, 0x20008840) (async) r12 = bpf$PROG_LOAD(0x5, &(0x7f0000000280)={0x6, 0x4, &(0x7f0000004180)=ANY=[@ANYBLOB="18020000e2ffffff00000000000000c685000000360000009500001800000000922ae83713ab9600010000801b10fb54a8cb72d232ad558c46fff4208d4990ec11ce9413ac30e00bd0081f8504e19a5183d769676520e98a263345e44d5ad12bca35510100c4d86abeb12303ff1c9fe0d0020000d60400000007d3670000008aff66d6b3181ffc1d62a3954c1198bbc4fa13aee48ca9e8969faebf3183fe803ab3f5024b52dc265b36fc9dae00a09404f01f9504d0976d252bd8d24538556e5e57bee3b8cf464ef3c6a7def8bad3ca6e3abdb21696e340bb8e2a093add57196b40def3858ef569147fa4108328392d322ab5df10a2f69a6bdf72ee7944e810d0223917c3d042410f57466f59544047d6d8ac44060000000000ee16c729300d2301800000000000002b5a8b05fcc154ad5290a8cdb97c343f454ff69dd6cbde49b28a6cb5f4fc0001745cff6e00e7ffffff0000acf3209a08439f1ff01779b6f6df7e02aa6d7760525b595fe1f697bc114ed1778e97a3f0395f946974cfb458be2a34cf924dc37b5592bf17956f3547497aba814382ff67b345b677a9d6523d87008000000400000000003fe8613ca29ff92be0d8deffff7b68136b0046d535dd39c0f35408869e9b342b953f91447e6b9eab304f134306320600a44095254b45a6c1312a13696c7202df5f764713504facc532c5a6d44d99ec7530ed7b0311000000000000e54e9072a22d911f4a2c2e2fa806e63c5cd98a8569a6d6bcfb000064885117e2ad910eae67e0ebe380d0f648713e68153579e02d71c58d147b00821ab9a6475b31e1ebf1369a04000000fbf3983f283f2f00000000992774814d63c933912d000006000000a66acb0a38856929e7d8b1b06c9bd5d7e5490f3b8596b694ea9483bd4bd287c83dd998a74694d18bdd8ad0983bc90770bbd26a82b9d99d5fc04563b523c47ef8c33400e90d02000000000000000edf1147a7afe772cd45af8aeffe2753088e02ca6bb2feec446ce7dbce66f0a93a03371320980865c7c62ea4d8f8a864dce9fa85aeb0454349100296ee2dba39c3f6fd6cf96714e11fe03b5062809a7418b165dd0336d226bac1e1223be1c97b15175d0e664beb126000e96549e1a1228c686edb475b705eaa9515c96f4fc6b3c925ea404e0f1de61026dc6c6618580fd6ce9eac602c1756f6d1056712412131ed9925989e01eae489ec7052e0ed72c326c7a8aa63999e2297c54ce1822d14b7c7699a9d0600f11f2e7f474cffbc35bc8623cd5eb68af82275a940be0400000000000000bcc3fbe7d90de96d6a8e9f32f18d1f606b381e4903b500000000000000000000004a2357ba5f6000de1cfa88b7165dcf4f2aaee86d4802000000000000008fdb686d5da2a42e4b5024b6535811f362201d4f82012e6af704973d04ea923c19e6cb723c1923b3eea2d73e176dff383c9fbbac53dfdcb1a68c98e96fe39eec23963faf3ebed3409144c7c53d6318ced678a621450a9b01e9f2772e5f2999d3435da02556e36c3215d2bd4e96c93bff3ad04a82ff3cfadcf65eb92adc6c68d66b11cb2d7556414a86dfa94bb7aa52c7febb1e9b2efcbbc5bccf9d39bed802f4f056976a9a362ee9cc624ec454b90200fd9603f96908bddc14500000000000000000000000000044d917c62b27679913075731e8fddb07c10c82002d60181588ae63a440454287de9e340f611267f37bdd0f2d21cb06fcaf45a0a297e396f428d43371424b307eef82c5d6d19f3ef0d3b8f7fa51957e3099caab31133b34a1d3eebc0f0c9056df2e9667ba0b55695c7894010079b07e7aef7785e2486472b5cba1f3346c1e8e23deb8c82bb6eb2c72c484241dc3b66da78260f800fffd39368b952f6f4a10295c50c887a31d8b543c5d10f2dbd4d0b84eaad43feb6e169a9f2fcff7000000000000000000e011bc6366f56fa787f212c1f8c0f47f50b1e9b5d841ea55fe569bb7bf1e78191c8a02ad436725771738a2a98891971e3b932352896e1ea10f62e8ef7a87e16151b39d6c27575714540d8c293a3fa4b5a825360423c1cbc8b5d19167152823ed853140edda002c16c842b168bb55f6bb713deb57d0aa78d6d4e5fc5be2c402bd246128f41bcb02000000892b135a92e8c844938aa98ba4839a1408a696454d40e5eed4d4dce481ca86bfac54c330331b7f2cde17cbaeb0377696faf546ecbe742d73d47d726a50f6e752f3325255bd7e8b5923aa3cfb6f7e06494f21ca450139c558000000000000000000000800000000000000000075aa0000000000000000000000005560bd9eb81e839e4992e64b074a66cccccf00334fa94da8477be7d99b558ec6a5b1596ac1e7617c6b32eed0cc70286caf2c5189a103f4b0b04aff171c4d388ccf67fea37e782f025c94c853cde330a193a967d907a8c88fcb033e680f559a72150cb900bafcd536f48797915a2fe9922ce27300009e1b36aa4730117d9b00000000003c630000000000008fbbd11b015c415ca04192fbfb1a8b0e3460af35771dbac10062835c9bab3ad09f7a022c52d8000000000000000000004000000000000000000000000000000000000000000400000000000000000000000000006ec473c54399b7b8aa1ee46132fc45da8292631178cecf19550108b8b8423de42957ffe9bb6d752e68d2bc2ce777a17bf4dfdfee5de0f3e4dadf51ab9562827b762fa611ba5f32861c19dffe1dc9fd5c41cd46cf131fd6b0c2ddad90ac33f768f9ecc70327c59918fa5a249befe98262f53c8182d95f6da3698a6a88c2c31d801a8f1f5e0ce05138d5422da0a6a62b9dfe1f39775d1d0c9186096415f544aaf76b0a1c877a6c826a5adcfb22c4a0e5a46271caa3eaf4f389dd5f3c20dbddc0377a4266d7b9fd61b9287e9b4be0a413ee31be0ddecab0ef7b25cba1fb3654ddf291ecb7768ac1e177042cb4c452fa6b3966950000000000000000c187da23d6855500fe8510b51e13a890e394b84a6ea2cc8d42b97c697c29122298d55e2e1cca8e07abda2606a3f381c64b9fec0000000a7965e4854e8e3572ad5149b3872342dea9252132860c9af1bd5fe263c0313dea5d6e0c11a466d6892ed65f34667dd79b07b5cbdd8aa7dd561a26b5562d4861a7e1b0f48930e0b696ea3bee7eb72794e163d7aeac9a0fa5403ac9cb421eae283b0550f1d0d339cd7b96e71d3ab48ad9d7975e0c9b117f71d3ab80a0c9b0284ecc469fa6181c9c71fce07a6ffb23296a107763138e8d9876291af2076890c47925ac773d95d2ca42acb3e5f3a1550665b898462c139ffd0106bc8a61b6117d252efcab7106b4c3a3c13a70ff452e9d2096142c517b0e91b5cf88332faca5b3ee96363065c3ce32d3d39ec36e20d597e05664f2526bd918090649da11f7299789d00f5024df1e99d3efecb9b457642fe810370ba4fbe00fa60a28af966a27a1659e448bbe43a1dcd2ea760018b57a36ac41ef2051a7b703d55c0602540663016e20d50385766df4dac47802a55bd38dd767ee9960c6daa704fc5d01a1459134d1b9edfde3be9e25a110228c64253588ff420644dbc0854e69a7bdda72f93ceaccf92cfe7dd6296c950db10f6dd8a5ef9b73cf6a12a1ba16fdc7e35b805f4fd2fcff0a623722149c1465e4de2d53f0f10b14c21865027abc71a12cb1e9f8029c7a20000000eeb0d53a83e518c8d2052c08b515d9d0bde24ac4e798040c7db0bb03c019507d6377f3d5dd94a27abc6d6b120d61f772407e0d2cb50d29168b68aef9f176b4c3aa8b21279d4ea9c1f669aa8c2c17d5b3a8d1dda58d26f1019af04b7774c85d5bce8be010f27c5211938031c3404680b01279c778bd1fe1b48c4b5b8e0fe756e54a8d76b7cec5e3407d93b4eadc446440607de844acf5524a4657e33af2115547b735b57b5092d0bc8fa6acb832509abe0882d570ce400aaebd7baff88526608d6991aac95751671174129457e4a03aca69d82b64b89e6ad6ed1e275ec5002e48170e4c7b4f3971481098dedb88fba90770e44bf404d5a97fefe2fe8e459fe45933b78c7ab5fe985a480193a20fb07da1455fb283df68af569ac82aa6dc703e29bf158931fb79f2abfa6ff7eb8c4f381c9da58bea460e2ead969933e5391970ca4fddd64da2e5df9c4d82044068caaaab771b37bb06bbe673056d849825525f1120b2250f6b8520381f7a74b1c687781cb6b23e67b918844b83dbaeeb559ec8520d710dd6d6b4e64838bd434a36ed03fc0c488b24571032ffbc9f8ce97041e1bc4729d539358dc9599c1266b9ce2cb6dd0ad57a6e9d3d4a11a27f70b2934c96237e2ba09c58eeda678d4d08b6da99b7a86e946215afb1b48792fde54492e306cb5342e2589874b603a1de972b1f09cc350096f5c3e814118af9ba0793cfdf20c77b34eacfdf63ce59ec4d2f867bf884e941559b068d908325667672b5e1cf71f4829c0493e8b141489ed926b822becead7a0a2b4a4c008ab16b616d60f347e4da54f06443507efe57ea62399ef4eb11b2f559e1b056456a53998bf1c6d13c92e75136147f91ae3a75ca15eb1b51bf700b3c0bf54bc3745ff313c5e75dc66386897f6ee45429371b8d0878c442ad2fe9baf85c1390da13efc353ccbef950c29f39ddf436f0d9bf1be1515ed251d8b6f11ecb16b1e8d1ed04196e9b6c2f9e068b7749bb6c1f533e493f22c901662c65cb761dc2eeff2f698bd4dbae83e2dfdc4f1c7f918a00515c1bc189d10ec22b35c92725cbf0ba244fd029c4f026f68e000000060000ab0476c3fd7f7c1e5c000000000000000000000011e43e39d3f4394fbfa13c416b1c443c5e52eea726491ad75100ebad7c6d5a665c59a3fb158e43da904f19e7e8daa4e90390b8da945f6cd78536c0d2be07221f85ad46b180f256d4d84592691d15d65896b66b63a46705338b67b72dc1c3075fcdc5cbffb0366151632ba5be8ae815dfea9fadfd31c473a24a73d3e5116c3023b3563c72d26fbd59877132bde5ca4ef8d92fd3613c768b35223f6fd0b5e9a8b98cccf1e2b4612e620e3a159d6365c9045aaa826aa0ee6d26cf0397ce674c20824584b464ebdc2f3ea26a7aec4570b242a6677a4e9187f8591c3a9bdc0000000000"], &(0x7f0000000040)='GPL\x00', 0x4, 0x1076, &(0x7f0000000300)=""/4096, 0x0, 0x0, '\x00', 0x0, 0x0, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x70) bpf$BPF_PROG_TEST_RUN(0xa, &(0x7f0000000200)={r12, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, 0x48) (async) r13 = bpf$PROG_LOAD(0x5, &(0x7f0000000280)={0x6, 0x4, &(0x7f0000004180)=ANY=[@ANYBLOB="18020000e2ffffff00000000000000c685000000360000009500001800000000922ae83713ab9600010000801b10fb54a8cb72d232ad558c46fff4208d4990ec11ce9413ac30e00bd0081f8504e19a5183d769676520e98a263345e44d5ad12bca35510100c4d86abeb12303ff1c9fe0d0020000d60400000007d3670000008aff66d6b3181ffc1d62a3954c1198bbc4fa13aee48ca9e8969faebf3183fe803ab3f5024b52dc265b36fc9dae00a09404f01f9504d0976d252bd8d24538556e5e57bee3b8cf464ef3c6a7def8bad3ca6e3abdb21696e340bb8e2a093add57196b40def3858ef569147fa4108328392d322ab5df10a2f69a6bdf72ee7944e810d0223917c3d042410f57466f59544047d6d8ac44060000000000ee16c729300d2301800000000000002b5a8b05fcc154ad5290a8cdb97c343f454ff69dd6cbde49b28a6cb5f4fc0001745cff6e00e7ffffff0000acf3209a08439f1ff01779b6f6df7e02aa6d7760525b595fe1f697bc114ed1778e97a3f0395f946974cfb458be2a34cf924dc37b5592bf17956f3547497aba814382ff67b345b677a9d6523d87008000000400000000003fe8613ca29ff92be0d8deffff7b68136b0046d535dd39c0f35408869e9b342b953f91447e6b9eab304f134306320600a44095254b45a6c1312a13696c7202df5f764713504facc532c5a6d44d99ec7530ed7b0311000000000000e54e9072a22d911f4a2c2e2fa806e63c5cd98a8569a6d6bcfb000064885117e2ad910eae67e0ebe380d0f648713e68153579e02d71c58d147b00821ab9a6475b31e1ebf1369a04000000fbf3983f283f2f00000000992774814d63c933912d000006000000a66acb0a38856929e7d8b1b06c9bd5d7e5490f3b8596b694ea9483bd4bd287c83dd998a74694d18bdd8ad0983bc90770bbd26a82b9d99d5fc04563b523c47ef8c33400e90d02000000000000000edf1147a7afe772cd45af8aeffe2753088e02ca6bb2feec446ce7dbce66f0a93a03371320980865c7c62ea4d8f8a864dce9fa85aeb0454349100296ee2dba39c3f6fd6cf96714e11fe03b5062809a7418b165dd0336d226bac1e1223be1c97b15175d0e664beb126000e96549e1a1228c686edb475b705eaa9515c96f4fc6b3c925ea404e0f1de61026dc6c6618580fd6ce9eac602c1756f6d1056712412131ed9925989e01eae489ec7052e0ed72c326c7a8aa63999e2297c54ce1822d14b7c7699a9d0600f11f2e7f474cffbc35bc8623cd5eb68af82275a940be0400000000000000bcc3fbe7d90de96d6a8e9f32f18d1f606b381e4903b500000000000000000000004a2357ba5f6000de1cfa88b7165dcf4f2aaee86d4802000000000000008fdb686d5da2a42e4b5024b6535811f362201d4f82012e6af704973d04ea923c19e6cb723c1923b3eea2d73e176dff383c9fbbac53dfdcb1a68c98e96fe39eec23963faf3ebed3409144c7c53d6318ced678a621450a9b01e9f2772e5f2999d3435da02556e36c3215d2bd4e96c93bff3ad04a82ff3cfadcf65eb92adc6c68d66b11cb2d7556414a86dfa94bb7aa52c7febb1e9b2efcbbc5bccf9d39bed802f4f056976a9a362ee9cc624ec454b90200fd9603f96908bddc14500000000000000000000000000044d917c62b27679913075731e8fddb07c10c82002d60181588ae63a440454287de9e340f611267f37bdd0f2d21cb06fcaf45a0a297e396f428d43371424b307eef82c5d6d19f3ef0d3b8f7fa51957e3099caab31133b34a1d3eebc0f0c9056df2e9667ba0b55695c7894010079b07e7aef7785e2486472b5cba1f3346c1e8e23deb8c82bb6eb2c72c484241dc3b66da78260f800fffd39368b952f6f4a10295c50c887a31d8b543c5d10f2dbd4d0b84eaad43feb6e169a9f2fcff7000000000000000000e011bc6366f56fa787f212c1f8c0f47f50b1e9b5d841ea55fe569bb7bf1e78191c8a02ad436725771738a2a98891971e3b932352896e1ea10f62e8ef7a87e16151b39d6c27575714540d8c293a3fa4b5a825360423c1cbc8b5d19167152823ed853140edda002c16c842b168bb55f6bb713deb57d0aa78d6d4e5fc5be2c402bd246128f41bcb02000000892b135a92e8c844938aa98ba4839a1408a696454d40e5eed4d4dce481ca86bfac54c330331b7f2cde17cbaeb0377696faf546ecbe742d73d47d726a50f6e752f3325255bd7e8b5923aa3cfb6f7e06494f21ca450139c558000000000000000000000800000000000000000075aa0000000000000000000000005560bd9eb81e839e4992e64b074a66cccccf00334fa94da8477be7d99b558ec6a5b1596ac1e7617c6b32eed0cc70286caf2c5189a103f4b0b04aff171c4d388ccf67fea37e782f025c94c853cde330a193a967d907a8c88fcb033e680f559a72150cb900bafcd536f48797915a2fe9922ce27300009e1b36aa4730117d9b00000000003c630000000000008fbbd11b015c415ca04192fbfb1a8b0e3460af35771dbac10062835c9bab3ad09f7a022c52d8000000000000000000004000000000000000000000000000000000000000000400000000000000000000000000006ec473c54399b7b8aa1ee46132fc45da8292631178cecf19550108b8b8423de42957ffe9bb6d752e68d2bc2ce777a17bf4dfdfee5de0f3e4dadf51ab9562827b762fa611ba5f32861c19dffe1dc9fd5c41cd46cf131fd6b0c2ddad90ac33f768f9ecc70327c59918fa5a249befe98262f53c8182d95f6da3698a6a88c2c31d801a8f1f5e0ce05138d5422da0a6a62b9dfe1f39775d1d0c9186096415f544aaf76b0a1c877a6c826a5adcfb22c4a0e5a46271caa3eaf4f389dd5f3c20dbddc0377a4266d7b9fd61b9287e9b4be0a413ee31be0ddecab0ef7b25cba1fb3654ddf291ecb7768ac1e177042cb4c452fa6b3966950000000000000000c187da23d6855500fe8510b51e13a890e394b84a6ea2cc8d42b97c697c29122298d55e2e1cca8e07abda2606a3f381c64b9fec0000000a7965e4854e8e3572ad5149b3872342dea9252132860c9af1bd5fe263c0313dea5d6e0c11a466d6892ed65f34667dd79b07b5cbdd8aa7dd561a26b5562d4861a7e1b0f48930e0b696ea3bee7eb72794e163d7aeac9a0fa5403ac9cb421eae283b0550f1d0d339cd7b96e71d3ab48ad9d7975e0c9b117f71d3ab80a0c9b0284ecc469fa6181c9c71fce07a6ffb23296a107763138e8d9876291af2076890c47925ac773d95d2ca42acb3e5f3a1550665b898462c139ffd0106bc8a61b6117d252efcab7106b4c3a3c13a70ff452e9d2096142c517b0e91b5cf88332faca5b3ee96363065c3ce32d3d39ec36e20d597e05664f2526bd918090649da11f7299789d00f5024df1e99d3efecb9b457642fe810370ba4fbe00fa60a28af966a27a1659e448bbe43a1dcd2ea760018b57a36ac41ef2051a7b703d55c0602540663016e20d50385766df4dac47802a55bd38dd767ee9960c6daa704fc5d01a1459134d1b9edfde3be9e25a110228c64253588ff420644dbc0854e69a7bdda72f93ceaccf92cfe7dd6296c950db10f6dd8a5ef9b73cf6a12a1ba16fdc7e35b805f4fd2fcff0a623722149c1465e4de2d53f0f10b14c21865027abc71a12cb1e9f8029c7a20000000eeb0d53a83e518c8d2052c08b515d9d0bde24ac4e798040c7db0bb03c019507d6377f3d5dd94a27abc6d6b120d61f772407e0d2cb50d29168b68aef9f176b4c3aa8b21279d4ea9c1f669aa8c2c17d5b3a8d1dda58d26f1019af04b7774c85d5bce8be010f27c5211938031c3404680b01279c778bd1fe1b48c4b5b8e0fe756e54a8d76b7cec5e3407d93b4eadc446440607de844acf5524a4657e33af2115547b735b57b5092d0bc8fa6acb832509abe0882d570ce400aaebd7baff88526608d6991aac95751671174129457e4a03aca69d82b64b89e6ad6ed1e275ec5002e48170e4c7b4f3971481098dedb88fba90770e44bf404d5a97fefe2fe8e459fe45933b78c7ab5fe985a480193a20fb07da1455fb283df68af569ac82aa6dc703e29bf158931fb79f2abfa6ff7eb8c4f381c9da58bea460e2ead969933e5391970ca4fddd64da2e5df9c4d82044068caaaab771b37bb06bbe673056d849825525f1120b2250f6b8520381f7a74b1c687781cb6b23e67b918844b83dbaeeb559ec8520d710dd6d6b4e64838bd434a36ed03fc0c488b24571032ffbc9f8ce97041e1bc4729d539358dc9599c1266b9ce2cb6dd0ad57a6e9d3d4a11a27f70b2934c96237e2ba09c58eeda678d4d08b6da99b7a86e946215afb1b48792fde54492e306cb5342e2589874b603a1de972b1f09cc350096f5c3e814118af9ba0793cfdf20c77b34eacfdf63ce59ec4d2f867bf884e941559b068d908325667672b5e1cf71f4829c0493e8b141489ed926b822becead7a0a2b4a4c008ab16b616d60f347e4da54f06443507efe57ea62399ef4eb11b2f559e1b056456a53998bf1c6d13c92e75136147f91ae3a75ca15eb1b51bf700b3c0bf54bc3745ff313c5e75dc66386897f6ee45429371b8d0878c442ad2fe9baf85c1390da13efc353ccbef950c29f39ddf436f0d9bf1be1515ed251d8b6f11ecb16b1e8d1ed04196e9b6c2f9e068b7749bb6c1f533e493f22c901662c65cb761dc2eeff2f698bd4dbae83e2dfdc4f1c7f918a00515c1bc189d10ec22b35c92725cbf0ba244fd029c4f026f68e000000060000ab0476c3fd7f7c1e5c000000000000000000000011e43e39d3f4394fbfa13c416b1c443c5e52eea726491ad75100ebad7c6d5a665c59a3fb158e43da904f19e7e8daa4e90390b8da945f6cd78536c0d2be07221f85ad46b180f256d4d84592691d15d65896b66b63a46705338b67b72dc1c3075fcdc5cbffb0366151632ba5be8ae815dfea9fadfd31c473a24a73d3e5116c3023b3563c72d26fbd59877132bde5ca4ef8d92fd3613c768b35223f6fd0b5e9a8b98cccf1e2b4612e620e3a159d6365c9045aaa826aa0ee6d26cf0397ce674c20824584b464ebdc2f3ea26a7aec4570b242a6677a4e9187f8591c3a9bdc0000000000"], &(0x7f0000000040)='GPL\x00', 0x4, 0x1076, &(0x7f0000000300)=""/4096, 0x0, 0x0, '\x00', 0x0, 0x0, 0xffffffffffffffff, 0x8, 0x0, 0x0, 0x10, 0x0}, 0x70) bpf$BPF_PROG_TEST_RUN(0xa, &(0x7f0000000200)={r13, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x2}, 0x48) (async) r14 = socket$inet_tcp(0x2, 0x1, 0x0) r15 = openat$tun(0xffffffffffffff9c, &(0x7f0000001f40), 0x226e80, 0x0) sendmmsg$unix(r6, &(0x7f0000002000)=[{{&(0x7f00000006c0)=@file={0x0, './file0\x00'}, 0x6e, &(0x7f00000009c0)=[{&(0x7f0000000740)="0b8ab99004e3f31defdc48d3025abdedf7e1436310ef51cf44ce8b14ed458eee091eb0bfa9ec6f0530606ae03baab7085baef5fd1ce24e4479139dc4dbf086dca9fb731ce0385a7d9bd14d72006b8e72005e1e71f41dbd53335c37b2ecbe7941cfdc3168585a4057ee4cbebebd0b2af6fe0f5129031279c764d5097dfffc950cd7ee68834359b30362010e88f39689a19c66eeca0764ca6ef31cc6cf5870f9995f9eff9e07879fa9966431854517a5812b3860e1320552927cce5a5cf4bae4b92a99937e06ae1d5cfd71171d362cf2298d458b1c35eab25d61ef4f1355", 0xdd}, {&(0x7f0000000840)}, {&(0x7f0000000880)="c5d23dc36fe0e908e0409c995a363d052e8fcb37f0a7e11440e469571a44f62eedee9bc74f2a9e8f59bd9451dc2ccfcad04beae22ddc912d0ebf78d2124f1d3684a18002288e5b23fed6c08d8e", 0x4d}, {&(0x7f0000000900)="c66b6d33a8857c4f93fe523462d8782c37954f8bfca42bfb0e73762788d9bcaf89401118b776dcf3cb8e876a8fe7adcd3757e11d41e7aa", 0x37}, {&(0x7f0000000940)="c3ef119ec808f72ec4094b679fbf1aaf2cf38e8b39cd24a43f3c7fb2c8f302cbaf342da1680f6165a3704b47ec0e6e18b3e48aa2225c7b606b86e4c6d0cfb77c8e5f41242f7d0af8d8bb4d53c7588e4690332e2727fac37a7120595946460de696bd55193672d1", 0x67}], 0x5, &(0x7f0000000d80)=[@cred={{0x1c}}, @cred={{0x1c, 0x1, 0x2, {0x0, 0x0, 0xffffffffffffffff}}}, @cred={{0x1c}}, @rights={{0x38, 0x1, 0x1, [r1, r6, 0xffffffffffffffff, r6, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, r5, r2, r4]}}], 0x98, 0x4}}, {{&(0x7f0000000e40)=@file={0x1, './file0\x00'}, 0x6e, &(0x7f0000000fc0)=[{&(0x7f0000000ec0)="42c9c677a79354bd03351dc160ca532fc656c65e8c3fc6f9161ef0a747a325aef705cebd83fc08e9d9217d673d71e27dfefb27f27c24c200297385b42d21391719ae3de89331cf25463aca3dca69db5722b556b8d830e2f86dfaa65c22112e8d34251ff3bd47d72b491c190ef2d35571d9ea7613a3585a49b4d61abe3553d69bc7ea515f3ef78ea2256e3abc70ecb82162cfddafbf8a49e84c8ee0647c67465edf50e9cb18e09d6d2eead989a72e68c8382ad18e3ca115f01e3debd9d2d0429f275c75", 0xc3}], 0x1, &(0x7f00000010c0)=ANY=[@ANYBLOB="180000000000000100000000000000", @ANYRES32=r4, @ANYRES32=r3, @ANYBLOB="2c000000000000000100000001000000", @ANYRES32, @ANYRES32, @ANYRES32=r6, @ANYRES32, @ANYRES32, @ANYRES32=r5, @ANYRES32=r2, @ANYBLOB="000000001c000000000000000100000002000000", @ANYRES32=0x0, @ANYRES32=0xee00, @ANYRES32=0x0, @ANYBLOB='\x00\x00\x00\x00'], 0x68, 0x41}}, {{&(0x7f0000001140)=@file={0x0, './file0\x00'}, 0x6e, &(0x7f0000001200)=[{&(0x7f00000011c0)="328e8616730e94d95ec24b9c6f8873e178bfada5b3e6c050c3d8ab2137bd5e89358b1d193b640f72687e499af2d4c7e95b066e", 0x33}], 0x1, 0x0, 0x0, 0x20008000}}, {{&(0x7f0000001240)=@file={0x1, './file0\x00'}, 0x6e, &(0x7f0000001540)=[{&(0x7f00000012c0)="3722910fade2a52d513917386f047c684997907911e8504d352e3220d07a2afc768b", 0x22}, {&(0x7f0000001300)="90b1e1c1f7c02e84210cc0f3866836ab070f6333119a4ab800d216a17f448ef4bf99deeb7271cb1780b8014c3fde40f2d14a07bd146b32b6d9a0a072913beedc22f5de01dbd949771ac9525b71d4379d848c1990dd758c74", 0x58}, {&(0x7f0000001380)="c144ba1bb64838e8891517e307fe3bbde227c5172dd75585cae80b1fd6a3c84cf520513f2e2ec703e64683782d98c9d5f58e6c2a4b5f5a92cf7aae65462093b29b09f70cd08153a7d78ae7c5aa9cef48aa8a4315eda93fe4d458c94038b9fb2f279b9401d404f4cdc8b32e128aa4088afb27976308a4ebe5fba77aa08d13eba68027ce1279119b13fcfd8aae88a348", 0x8f}, {&(0x7f0000001440)="c565ee8b3a54b1d16810dfa319503e24b9bbbc78c23d66c12153e4df62d8bc649b4d83252d4481ec5da2fead7befd8e4bf339e22f768ceefdcdaef63da61fd2e0d2e05a918d2ef89726ff760bab4ad3686bc4a267b1078dfdf2f81bc6df3d2d4c6a5d7999c4d94c478a6257d823ad13b7d6a9c474a44aae5b9c8a6e2c6b31e91af0b379e82856d4ac18c49c2580e99221b868fa0404e15893353a8093972dc90beb899b5bd6f70838968a841cf1d5491ed8be7c863", 0xb5}, {&(0x7f0000001500)='?', 0x1}], 0x5, &(0x7f0000001800)=[@rights={{0x28, 0x1, 0x1, [r3, 0xffffffffffffffff, r5, r4, r3, 0xffffffffffffffff]}}, @rights={{0x28, 0x1, 0x1, [0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, r4, r5]}}, @cred={{0x1c, 0x1, 0x2, {0x0, 0xee01}}}, @rights={{0x14, 0x1, 0x1, [0xffffffffffffffff]}}, @cred={{0x1c, 0x1, 0x2, {0x0, 0x0, 0xee00}}}, @cred={{0x1c, 0x1, 0x2, {0x0, 0xee01}}}, @rights={{0x14, 0x1, 0x1, [0xffffffffffffffff]}}, @cred={{0x1c, 0x1, 0x2, {0x0, 0x0, 0xee00}}}, @rights={{0x18, 0x1, 0x1, [0xffffffffffffffff, 0xffffffffffffffff]}}], 0x118, 0x8000}}, {{&(0x7f0000001940)=@file={0x1, './file0\x00'}, 0x6e, &(0x7f0000001e40)=[{&(0x7f00000019c0)="e9e32c41e4161ac80cf71fc8619599e7e3d7cf606196aa75158c56c97f1244b4683f934607e09d391d23148500dbe32cf66c3bc3b3fe4f8ed5e8c2b7c0a735d8ae508fd66c7a7014604208ba43277a4ca6a73dcfb845d2a4929284259e4be16a3674b05f189a36616cd1c534f3bcc344006a514f1627465f8e4487e6416402d4", 0x80}, {&(0x7f0000001a40)="f29f50bd9a9d9d77a0c84ff27e307ae4b9441b8928b2a4ecea8559bed6289e0bec18ac42056f773607516328b869d6e315e7d0911b3041d57d8d54868b92de98d9dd133953d90e4f2b37a923e3524c505a605f57e9d3239ebd594b3d8c7c3def9ecd7977d8f7a77d95b37239cfca0fbb437cbd6c23ef368978c89d260500211ac9af1ed37e18388a33f76a4c0b3f92a60df65555eaefcd06373ba1ce5dabbf0c8041", 0xa2}, {&(0x7f0000001b00)="939d80c0e28ee263596d7815f32a55119cd251c2bb70a3e29c0f4cdaf717ee454d34169b4368f944f06b995d2eb11ebdd2c57d23caa9d657d5cd666d93b5aa99", 0x40}, {&(0x7f0000001b40)="18de3d1a090c3248c3c138466b46d981f5961f9b44e631aa8e156483887bccd733d1cea050d28fd1447a8ef885d4886cc33dbb0fbe81267452e02cba175af212525be34eae59267002de2a11337cc21f2715b764d8f5a51788d8baf9ce81e54df913134bf123c0ddc26645bfabfe84d7c34f1b3d11a50646e9766a784c3bbc381f3bc17e7850a425ed259b28d06086e6c92eeba316cade95f350d26854220cea30696661edba53d1", 0xa8}, {&(0x7f0000001c00)="2eed2d06bc4a7c4cb6420577c3f7347ed4943868cfa5e038c5689196045379745d3a5799845c", 0x26}, {&(0x7f0000001c40)="a34d25b014da5df8c04869f6eabe60886f898ce27ef40b830744f11225290ce8e2281cad7d7348792ab5f2", 0x2b}, {&(0x7f0000001c80)="6391b92ea42ec8033e2bc025e0cd282b23f182489f7850a35f72648f5b5f82d0794b91b021f45e8d64f358713d8397db07a64ea062663ebfeba258c7dd42b0ebb807b566bef62d97b2d14da9de17b19e3c4d85230a86899d158b7e0441f613e3d8fb654346f52daf1e19881309cc8f5d15e7cf27c5b37c1323df5d497625a01342996e8b8abf8a0400922bf028a6bd79f998a6165c913d9a68a19847978d372d57a40cedadf47dc3e2ee9995f8897885eff2f3cc532dae78e3", 0xb9}, {&(0x7f0000001d40)="7d32f4f3a02f098e56d123568c67a8b7d72628f01dc541a58134d2ccf04c55e2d42f38cf4aecf5b308692cd14681e8f7d2e1bf88e4668890a355919dc8899b3fcf25099076182875cf2aa3d3c83368f7bc2ac97f88c13c783b256f20747fbe5ea6a0afb70d874bf0c1b56b7bf8710e37e01e1af2735ce8a1fdfcec767ba9408d87d9c7ffd44b96b93abf95efdda9751d44010d5ac10fdc83cd2c89e2424c5b5052f7f6eeb84559b76ccd48c1e381d25886368d327e1de296eed50b9232a79555ea3c9d5dfe66ba5ff90a2d0a0ba9b4e5b63a635b22011b9400dca0b7bf17ec1d037a02a0c7a8f2cd875e6f68ac9b1d5a67bea3ee913099b20435cc", 0xfb}], 0x8, &(0x7f0000001f80)=[@cred={{0x1c, 0x1, 0x2, {0x0, 0xee01}}}, @cred={{0x1c, 0x1, 0x2, {0x0, 0x0, r11}}}, @rights={{0x20, 0x1, 0x1, [r12, r13, r14, r15]}}, @cred={{0x1c, 0x1, 0x2, {0x0, 0x0, 0xee00}}}], 0x80, 0x24004090}}], 0x5, 0x40008804) (async, rerun: 32) sendmmsg$unix(0xffffffffffffffff, &(0x7f0000004000)=[{{0x0, 0x0, &(0x7f0000001540)=[{&(0x7f0000000100)="dbc4f24b18fce59e560c179fe1a0e7a051977ee0c3b246821ee70cc5b4874a0fd7b5f20706de65f57d085571d3dc21e249b5a0a316cf18dc611fcac9d6cf65fb73973e3cbfa874ad40e619990b0489585b6b1bc7ece2886b5332bb7543fd4fca01d56fca29c82e8d042e615c7414cbb846a2d769b81fcca44b7a9e2c57fa5cd63e871cdc526dc5f2ce1d46eeed22360d31aec0cc3b6cb321f1097d2eb11616c612f9abd3f9363c20503e8f0bdb622b4369e186ebf9592ab60a7548f727ca152eeb0e362d61c62630c1", 0xc9}, {&(0x7f0000000200)="9b5fc994668022390ede555253a754f67eb9585b161ca1030b9a4a0e374e25bed85a93013ed5ddd7fcce7bd5a58631904ae97eaaa80dbff37cd10cca63a3a6c296391d764f28fac76c13fcc8ff66c88e54c8be0c7f4704845653cd78fcc7bad2cd42daa117c886dc13fc8070a7ca47cd612167639d7e0481b0467cd46233dea07ba7afefef764f38deb3bdcfdb26cab03e340b66d93de2ceb463fc4a079b4256de036a307673c90ce0a65e54c6e5e43b15c6b98330d72031b0e9b034d308c7b278504d7826088114da1c1a8771d42454af2e4f8b814c23a3909a31cbcb4194f39a574a9332b096d2f195a0bb64c8f90d0baa107168130772ba501a34ade1", 0xfe}, {&(0x7f0000000300)="92591ffaa769533f6a3b214975b30d0de86745302fe06013dc26ed63edf67b4dbe25c9dead1070a285dc22a69e9717901cac58309aceb8f39fa017bba29ae63c2a8af5ca56c6e040e84ae7de73c42e26ecc4bea395798df3568de0d431606f0a03739c6feb8166fd355c68d86293aa9a78e696acee93f2df127893649983d97346fb3832cabc8453dfbfefcb5c0b6b63dd718ab4e1c16735c35114dcf2c6ac5026d45cfe581da580fae61a311360f7de281241e6687bd0ea459cdb5c1b56b71736a90238d07a3700c9c512549dd29d82dd4cc10f654540bd4fca5b6d8a6e2f583886aaf79c131085190585fd86b7d8f0ea214c68a27b", 0xf6}, {&(0x7f0000000400)="22a4e9c5246d22d7625613407cc09286f565af4b0e143b536898a61683bcc0045d093f8f63a12f43f598f4f0fb1ccdc7fd17f7e11ca892648538c38a8009a7dfa62c6035aa7279bb601539b786c8121d56aa8d049ac2b152597503008428a666190ceda8725f8ef0812b3ec6ce876c48a8e1882827df28635e8ab910ef8c01484394b7fe206c042e79246c8ac690c05ae71e1a952acec3f1f0a445f1bd395660a92392711fa1a58154c0b2aea92e7b328c517aa5811a740f3253a402dc73f94e73e3e2ece62d3ab9ca2a0a6d458ecc6ab643a6a4e8b7d87e9a47813d522b7d7d9d2846323e4e4531cd3c375e9e23f37a1fa581383aac63649e60e7d96bc87381630c2040dd95db5b1b97bf32ae5c66f974e98ace003a33dc5a922e34712daa43bbc86e76fe866589cb8fa1282b5ed8db8ccedd64e312cf7ecf76e517938cb6ff93c8598bd4b7ffddf7b7e9595c719f489a0af55943b51c892dc828412a40192d52048b494590550a72541d6d54c393e1886281a1b57f47f536b15a1f27e26392360ccf7ea4759d4e2c3d37c9819211dd6aea334e15b189b086aabf3a42b9306071a3c0b57c66f94de6bafb0cd1fd22a048889656a0458d693ff8562c3c9c8409126082db148b1de8aba23f2569daa955b64b45769680a6204f77189d5f24a044fe1bdc297d5bfed4b7cc113ec1a50a3c6c6884519ea12cb6bc811df9c5d3e6458efb9747e48bd238c19cb8057febc3599c94208138bcbeed9c01003a0298a103d5d004e0851870dddf9363a86b71f670550f6641148164b24adbf4e668e8ada237fa594b943c4a5529c613f82719a5bc84914f1063c5a121296059641af891c2ec84cf1ac6658c7fe04108a502bcc70e93014f9a5e81e4bda3196aa02915c8f0ebc8f1e442e5c33d8e2aa2563bf4a961aa2dedeb2636da2286efc7b8d32971b80aca20ddc08e6de9c6c062450355d7860affa75a772db952c3eabb7f3578643bac353108b52e350f957dbe25b3a8e52cea9a89aaba3def7c7365b22fb16a22ece96d4a65e39aaf2045bb01de2d207e886219b006f8414cef79fdbecf781706b45fffdaa3f2442280776e2fc7284106739609a11862589a058c659f95797839363c7e8006af72758917a76e45c8cf0358467b74ed985e960b2460d0b5d3e8ef94989c42a4905ab231ee823993095c62cc48b93293f8f8a0e9b611a07ec19a9a08c8a5d12ca63e20bcb9e89307c081f105ab7752d054fa41e7120e7679d3acff82d702784e20c30aaf4d444195fe02f054bfc82cd4f7116493b4734136e2f1f33c2a03b8c1a32513d1da6f49242a72710cc200246393dd189bb2ee028c861c82298a9649ecebcc382ba6e0580f42c85aed9d2dd7e788fd246bc0ef9ce607173b1b39b9192d20588b1bb9aa2f34c1c3488dd61cbc5f176c4b36ca04f644985d80f02c22ce2d6df18e33aa740990cd9ab84f3df6557ae980ae6278ce9b6b63ac4a18d319e5b5a4804146a7cb8b086f22cef08281f517875592c5003044ce6a6913017323a5f24997334ed8359f925bde255bfc1deee146bc116eb20c7cff66f0a2f1c8df00171fcb3062866e17206a84f4369de92709e8642cbdbeb923969f6284c7d6af58d8e9c5d7207a75b7e1ebca1c55923d438dcf8575558b422baa7eaca5284f1814c11042678d5ac76c684f88ad78cd79141b8744473d3233d4631cbb355e90921aebc8205cf8ddbb9404494d2b152bda30a76ecf8adc44118181240bb945b58e1219cc4c3bb3956246d8dab179e64826a54064fa7b110a8063025d327b5357f6fa7cee845611f62d4af9996fe48bf0d37d455b2744521e66f33e0832d564846eff0ba91e9043e037f34f1448d934b35cc39ff293fcb4ab5afb3c72920fb85dcce7d8b13676d780020f72eee9f2751a242a9e339686ac4b960517a96293845ad7c1e125693c88dde2d3a4406c6338fb69f86370b9d0bcc725039099a4ba30b51df0d67b5dee44642dffeb65a2bbcb434b9df0377fd0fc814a2b8bcada5eb4f2d5e2d3e782b663d7c4d53506e078ede182ac0cb9b12b1d9ffa462d03e990761e9fa6ba5aa7010025c63a95e40ec0e0d5e3636ee50a076e07533567b049d00218a328516b797fd82185a780841560780ee262092b30dfef26d79928caceeea3e900e8486678cc558154a58d75ce3e3e537c5a8d5906200900f8bad0151c996e2139c068129352eb3276763606631f72925c568709ce8a50327c1dfb66506312eecd8ae3f87eafb55fdb8fbc00ba1fe6d5192bfe44fd6c40dc7646a8d06fda1e98e952988fc2c930215427d2b162b1d17bb051aa61d3cbb556784e36518a853751e01f9de3b1007888522d73b367943cb2aa34772c0e22ef74a9fb6a5578a42feee83de6886acea93120dc876aef365bd1e0b62990f8f0697053c43c26a9d241cde299cbd8ea13b18cb61bb0b1f8202d6045a98e1f8665c0b9485cdaf769d596902dbc4c34bd2b81e4be342031cc9f62405b30f8d6e1ce0a1a4c2f252c293eaf9022a3734b92be39b79340e8b07747bc1dfd1c6f8dadb5fe8855b7c149ab2dcfd06d6722adb6fffdc494fd031d4032c44851e02e389d7097fdb4f89b41bfa8d501a57522d3448120b37855f8addf36ab39930deede738fb936c2ddecef7c0e77dccf9619839b0f636e2670e017b4facaf2152eadbfd414e8971593300487c736407abbfbc9f116abec4ac7637f2bbf399415801230df66acd2838f9f05c21ff2089a82e6aea2e5a3f1841cea5348eebf531987717c0f171940373786e9fd74913292c1dc92cd81ce4c43f705a989e2cdabc5450a85a8a6dbd68f47b4546c28d54b501c25ec0dcbcaedb934c27db33418e12e1ef4cc05b9accc104afefb90819237222f187a8c45a5cff0823606a5161f323d2f5d2477af5e33fec044d241ee655cc3b877178eaddbc4c3ea9ccf0a8c02b333dc4d2db4240750f9fcad4dee434a6871a24ca7b875911da5347cb1a238b6b864afa053b334a8f84ca11fc69c8320ac21dc7366aac8110ddb5053bb2eba61f3365df020a3a40f5c28f785fcb1231b03b27e414112bf475e0d4b9a9324fa5a929602c4ca5342492809462eb3e33ead061ef97890df95c5876140905a25a8f50c3499c7e41be3c21a382cec592d6786a8a5b11633463f396d6709e9e89bceef42843d23b05d82d8479a7949902fc133aecb6228bc85202f2e0403fa1be9a8db8080c849330ffd4073e9238a3b21e223011f14a65e9aaeebcda997b4ee32eb5372ee4bc0106bf5b5f8942ca8e0d9e7e301cb08074738293b144eaf723cf26f4fab0652d7fe7c4f6723219ee205a4a01c2f7080100b583609c685192d7aed5083a69969782d76c38fdd4dcb61a6ef51c9d95bd7fb3212ecec3b383552970e58a11dc99169201ca95167c59d99fee8fd8bce5b590ba620a168a3592c15c62f756b6de4e1b7deb1a9bcfc57a88d5a3cf9fbff07b9d6094b74f6c27d01f30eea8e7741130dd7ec2099cc87654e2f0c689c85c5a24e1c63f37e51e98a1844a153f8599cd75371ea482ed74e01970e71b0d10e6afd7f0615c7086f220ffef9744f6856492fdd93082232f3b3bea309bc3bdbb175026777eb19b3eec490d477d329e0353c889191b576b3ee22042f81b162c41de634cbdd8d913942f8f099d532c2a792a947d68eb0ee13ffcb5b0304de64c8bba0c72b3ee5c9473d2058e41b963abc824b4e644c15c5040219108415bdfb7f88a6d5b1e305721b927f5f84d106bc0f830609b9fc72e27020eafc7c07948673b564d640a8f8cdf76d1452626ca38aebb1bfaba986c0cb28f162a0077a030a79dedc1030896f5342c76130d3593092f2b25faea80c893df46c42649dc88f920a3b991d877481b531a7a8d8b90a5dc7ca90bc22936ec00702da7ae01015f5c46efb0b1a189483aa908de49a9b2b326b0a63a46cc24a5a58113179683eb1dcb2fd6f7542a76d739de7b955f34efe2e6530e7e50eca4b92cfdbcd26f17d2da53ab089dbee9ec4d7f5af21552cee181c805206a123587b3b106a81dc72392904f3f6555d0b709214d11d4104cf551eed8740483c69105f4ee209bb2644e21f3d785dbea25ff4c3e6c3b0d1abb1c47247a94ef276f196c14ffb271427ab0764abeea0104f0f1000349b1e856f9a6eb8fa04d51ebbb6ae3e2547351a8fb335671144a71a223fbbb4dff6d4ee767bcd8167548d1f7746895eff32e320142c927a48815bcc0d37aadb1f752a30a477b734d6e83327042a87a7902a06996d4571b7f35b5398ec162d398f6170a634a6f5d7446b30dce0edd2f7452d668ce48c28f5672c67096b5e43cc96ce4ed4af7a5c4c9b6b293f838219948e0c6a6f1fa4235b67cbeaf589332b2d3f1021c5573f361e809d934c602e5f890d06ccfe22ed3a5fc8ed8ebc0421296e43447f8c91da6bad796d4a3f843b9e78b446c5d1ffa8d8125ac838b64ba3e0fc7801d1ce4bfabbb5491eae994633f0aa3a99b2d3d6db3276d40bc6352742cf2353893d2004c081392652804958259dc1ee479beae1729e4d1dc3bf75e6e6f593076c84557374cecba1cec7f269868bd17ae3f9edfefa918b7de6dc33f5282a8ed11bc5628098fe9936570561d5c7331cbd9095152c217f4bba4a9d98fe45d27f7587981bebcfb21542de0c1244959dee05b753d2463b20f9e1be490dec1176414a88f4c8bdb8e658875adec7b8136bcc0a8a3b90384c43ebdea5a6c25c0af79b49bfb41247d8af8023315ca6181da2ec3542eb9ba567552ce3ef999f6778c2e2349d93b198bd196ccbd2513328c66b84c65475f8f5cf8c486076dcbf812ca812169b001ac54ea607778aaf1ec516ef72cc54cef8c0c82861f0faa78a87e5f8c8a0eabb07ffc5f44ff6261167587fbd0b33be0a1a923eea862d1ca8de55c8b8b2920c899fb937908bd45d0bfbdef74543761127ab9a55940c43fe8c6a525d4a72952bb5a49fc36dd834ccd953258bea798af82fec0e87fca987d7685570e6c768b8dc1504af9bd97a4f9069594fac84f8c7aaa228ce2037c459eaf92f51004a7ce4d9a6ca4e14bc874d6eddc053756f75742da808f54f322edae326ecfe62ee8a4234bbe008ef801ad411cdabede3b287c64f582157b890a2db76ca49bc4657324ea6e91f419b3f9a9814cb798d396f080f9a29783dc0b2809832dcbc916b92015214282473511b28b389b560429bc0361e800a1fbfe8a90ed362803952f1442f83ed192637f6e7a8fe834608b8e66e05ce6de8ddf53a25bd3d85e3158982ed014fb24a9bf5feec63f3775b2d19f4406cf857de88479249ea540d14579fbff3be4447df73a6560bde2bf298d29d4308840c47fe8c80d13942e4fb010095f4ae3e02c71934ad90c123fc0721c17b0ea7b6b053c7f7764de8f63d0f064f40443f8c654b00169c9debd89321dd62b5ddd1c560ddc7a5c5dfa03a6e29a2d3e97e0441903a9028a1f3e86986e044fbb3327fac1bd749e701883b3b6472a9ccf1c063270f5e6a44f10acdd281e8880edf5a345ceaa2782f5b61942058e121b88b037bba900463e63023193ada76f995d0fc389c8c8ea56170b63dfacd5a4c126a39370af6d39252f76e68cd3f30099857f5dd79716f89bf8e8d22cb989723ad807bc9a3a2611e819b0ea6f0908eb80bdcf406b355c5aa406b81464cdad037dd59c2065708f972a3d6ee9ac175032cd96d8e31c013133fa03cceb7fbd8d27c20b8950ba86ec74ffe5175212f68f662597f53ffe2fc0de2ab729716ef40a4e676", 0x1000}, {&(0x7f0000001400)="3ac339f3812e07d9bd54490ae4468588b59d8df79a6b31aa2fd71f8b2fc0ff4f66f01582676832b8d506477dc75ed4f78e39c0269fb09d9d6f40ef35b5e17a9b378931dcf883da028bcbcdcbf97875b82ad1b287396428ce27bd7a17da0e29d61836906cc74418e9e28f36a2752dfb79efe8380c58366352f31d0162f7f7bf877ad1476eae51f1d0120b9b0639f61cc1fd594ff5ea0a881144430979c6deae282c1f5a8fca5bcd27bbdf6411b0e7f083d93dccd95b424d8f3f226d4b4a776ff6", 0xc0}, {&(0x7f00000014c0)="e91e9286a9f9d68f3079791af825d71318328b80fc6e23f63d7fcab2bdb5ad2d3977962376b57f1eda3407279bd91556a4443976325eb44b0cebdc6d544f9447a9d7a95ffb0a679cd2ff6acaa0b892a1c13b1478f22dcb68a1a9a33a44cdfd7d009df0770fd52270f22873c8835caeeea10239a1749dd0074812bc0d12fa", 0x7e}], 0x6, &(0x7f0000000040)=[@rights={{0x18, 0x1, 0x1, [r0, r0]}}], 0x18, 0x48001}}, {{0x0, 0x0, &(0x7f0000001700)=[{&(0x7f00000015c0)="1d", 0x1}, {&(0x7f0000001600)="23d7500162489addcdb8e0ef36f6fe3631a7e452580c32098cf3313c0a297961ad4c1ecdd9d462fd152a776ca2521a2f6f368d2d3584b567f7f95a44d15bc493e6f5d85a922dcc185820130a12ce988aea2cefed809c001c9cb462ab14dda1b20a7f5b886ab61b021c46b00c83e76de05ad5d080775423a29bb2b025eff86a6510a0f44d3c9e2d8f9474a82b2f2436ecc704b9ee0f4da3cc5bb0d0ca3e3b34d73f53df66c3ed6d57e9e0e220aa143f881459f0abd5da9e792cc17121a9b20545982aae396830f842", 0xc8}], 0x2, 0x0, 0x0, 0x90}}, {{&(0x7f0000001740)=@abs={0x0, 0x0, 0x4e22}, 0x6e, &(0x7f0000001b80)=[{&(0x7f00000017c0)="3261c866bcef0f0738fe676c4661013f5ed839e583c3f5847fd2565a1f2a2689a5b66391c66e9fab6d6c34558180d981d3ca9dcd0847df4b78659e635fb959a089f96860ebc076cb5019114bea8e0dc80917df90c5aef5d32e13b24a3918f9034fafe06db2841583e3fef80fc400180d45a62682f62b41a33a5fce0216d27db63f764a972e51944deafae86ed24fa66b1913fa", 0x93}, {&(0x7f0000001880)="ddcb551c314ca6e28c135a5d99b874ad7e3fee557098b239914517f36e429cd44b89b30147952c74ce74", 0x2a}, {&(0x7f00000018c0)="4c34aa080a1590198de21f893039b7cf58d73ce929fe5e1b9b03ea82650b17f347ba96f9bdca37d119261c5d5e6e770cc1f68ba8851ab3b460e25df10682d398d15a578748c8422669aab98629a01e3e35bb0b8456b110a8c66750c99d115d4304863c569646bae6cf4d4cf58f2a4e5a9ed6936b1cab0a5cab6456a24a74fc3d665164f5ceed7b9e40d8fd8a60e89cb67572e516c9bf2449840c85066f78bbdb68c2103a46fb825a1f1c79b2cb205aec986ed0", 0xb3}, {&(0x7f0000001980)="07d549102648b2eb135e07712c223fd96c06b3556edeafbfcb697cd4397b0dae9746073bab2308692f3e7148707df8fac453bf79db66427b5257f3fc450943d3d54a49f344665d5e6f13814d", 0x4c}, {&(0x7f0000001a00)="139e2e3783ac11cf91fb0815f84887a9bd200f75164cd5a53255fe80645a2356095833eb1b7a3ea507a6caa36ca98f6323dbd8c56662ba8aee03ebb5ec25afdf71e2b76e4ff1aa94d2de83699c9893bb20089506d06da6f38189ff21566520c4dfc56998713c2b67173d1eb4252fee5bdcdee49408b57dedd1398730982e74a73ae875041e4bfe40db1fbbbcd84388f3bbe0f3f811b591bcc2b002736582ed4fc7269090e50dc46448b2a05e2cb795b91952dd79494203578283ecb3060d7b6853815c56c9eaa81c7d88e0664e50789ba578d0666dae0e67a9282c543c4a04cb26c0e3", 0xe3}, {&(0x7f0000001b00)="732d1f48c004203e43ac0bad0f8cd773deb6f15bbf097712c9ed41cbe40e490b52ac0385f109ad8f53de43e2a0440900625145db9337678c9571a3a68ee1c4bcb8efb92c31ccab4125", 0x49}], 0x6, &(0x7f0000001d00)=[@cred={{0x1c, 0x1, 0x2, {0xffffffffffffffff, 0xffffffffffffffff, 0xee01}}}, @rights={{0x28, 0x1, 0x1, [0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, r0, 0xffffffffffffffff, r0]}}, @rights={{0x24, 0x1, 0x1, [0xffffffffffffffff, r0, r0, 0xffffffffffffffff, r0]}}, @cred={{0x1c}}, @rights={{0x20, 0x1, 0x1, [0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, r0]}}, @cred={{0x1c, 0x1, 0x2, {0xffffffffffffffff}}}, @cred={{0x1c, 0x1, 0x2, {0xffffffffffffffff, 0xffffffffffffffff}}}], 0xf0}}, {{&(0x7f0000001e00)=@file={0x1, './file0\x00'}, 0x6e, &(0x7f0000001e80), 0x0, 0x0, 0x0, 0x4000000}}, {{&(0x7f0000001ec0)=@abs={0x0, 0x0, 0x4e20}, 0x6e, &(0x7f0000002540)=[{&(0x7f0000001f40)="e8d2c3aa1df1032adfce5660b682c9124dafe604b8e4a2bc2ffd7252deea621647eb8a534511086f67b5d22172fc11888022981b8f991e38109db1789767aeb03b2f70fa8e201198850be5448582a6d66563c7744a238044c526281d4d9450cfb083b967afe305d7e3c928e981e551ec090c1f95fe9838e58dca07", 0x7b}, {&(0x7f0000001fc0)="d5762a1916f1629d709a205186c40bf13c650ba174f11068e0bb6cef331350f4c5ba98f635bf9d833612b7cd47d90dc04a8bfaa0e9a0264e8dacf4469caa7f048e541caac3bb8318f30494fda66f95ada221a40d613f8b398d6b1fc34728b60bbe0413b741b8c0e88927b944c27953019728bd32d12d5ac10749fb3a1c4ee35da8c5f95ba2f4fc8b9454f636d64c090710423ac17e37a815dbe5c2", 0x9b}, {&(0x7f0000002080)="14b6f6", 0x3}, {&(0x7f00000020c0)="8db96c3112b7496d3f825541991555bbe19f7394b82d8c3b36b7254845f4b707f2ef49c150ba8bf81dee63f6476f6e82acd5ca", 0x33}, {&(0x7f0000002100)="6ad44ed168a0d66d72f80e6b05e8a3b997cb8b2bdcdca2be3a3c7cc6e476a7afc38aaf88dd89ce08df595f83035a069a6744c8a995794b9d05af913911", 0x3d}, {&(0x7f0000002140)="ba40ddedd0d4312c127b6e9840a85676a1d46c2ae2b3dd5d6711ca294d38d944a0d203c6a2303175c5f62a94d1398c22ca88178ad68a660de6fb1095e49c03882d9094df04f0aff8743ff228788f2fc31dcdadea68ae928b387d52313c13cb4c6b3b9c21071d52099443cc7cb0e25379c251eebeef8a37fcfd171ed81716de60a904eef5857377d2df1dc1b0baf73a99a16b5f3952dc7651a741b15b16a5694d240436f62cd16a8c20e13b8e00b1968e688b5b315e422d4c756a3858c9cfa5fcd1ff8b790107712f42322cc9c6cd3cc50bc034045431b3231c59fcc74fa8a4d3b70a1d6a153ef2eaa9c490c2d7ede50f6596ac2284ef12", 0xf7}, {&(0x7f0000002240)="75ef1c2193163719bd5f11442a87cc75aa1f5a230d8102a084626f76aa978fd9f0b8bbc0ec1c9dc9f2babf33838ee121797a0c520fbc0dc8bb85abf31c3a82e58b292d091f19ecd1e8ce25ab019026fcec7233553623dab454a0d14056bb9ae9300291594a9bb251e1fdde2d682a816e3defb472268219b2", 0x78}, {&(0x7f00000022c0)="51a84ca97ee3f2cb3e8e47c05efc6ec37ede54a7c0d3a2f10b1b725d507383a228951bdf405c4dea47625a5ce894164511e08908e8fa7244a0cd2291e67e89f8c4fa28bef10e954f07fa91c884da3e8e070826a7185e35e45e249c33a84c9480124aabf0c069d5dfa2b3ff99c8c448bb4ad88957aa1369f9625fed40f78c73abaf5448a58b6304568f1b714a9c4dd16acae4c9561c67beb16ddc1e0f5407205558b49909794a7432406077555072ec0345e41d8f9e44f5a05f7a84e729f5cf9661f1bdca5de75ef1b402d347bd51990f7afcfc94e2688e8433a3875eb47b7e477148", 0xe2}, {&(0x7f00000023c0)="36fee72f4cf163b7a12474036c6836098170aae8d0eb701f7bc3b1bc8ad03c4c978a278eb6c7541d78b7da14c76e43d412b52c586058d0d7e8d2f8298bc44fb756b73ef4934902f230777452210a5bd269d5dc190b802709d2fcc4d79c19065a1936860fec385e2e0885a64acd8e6b8a6ebf77a381bce3b742c9c8c604053ec6f1764cc2bfcea0219ed108f8e429c8b27124cd1a3b546af9021724319c7fca09062a0e62af83f177edb845163c7429e01964b5ec1e83fd", 0xb7}, {&(0x7f0000002480)="14971659348f75318e5669ad1e9acd0928c787f7cb364311426f056c388a55c61eb76900778c65949e4432318e05930b0e894c9b825a1fbe4cd7c36769f3dfd68821c9267409d185b0174adaa11b148cc372f8157be9d79121f5b04f92e819109b1f9bfb4127c23d3197056eb563d93bf6344a6cf1b5c7a4dc374bc49010a224aaf64e4a53cefe3fbe4c714d2ab8a50e7df9bab2c13c2a890d75186dfd569201b8ca0dbcf38bd5fb2f26b5be", 0xac}], 0xa, &(0x7f0000002600)=[@rights={{0x14, 0x1, 0x1, [0xffffffffffffffff]}}], 0x18, 0x4000041}}, {{&(0x7f0000002640)=@abs={0x0, 0x0, 0x4e23}, 0x6e, &(0x7f0000003900)=[{0xffffffffffffffff}, {&(0x7f00000026c0)="1168e28167351c1e14a541fa9e14c46a25ff5b6b3074973dff0f482804bba468c47c4acadb85406a6f00e57c3f3efb663876696baf7ee607d7e35bb41dbebd7293c45a8fb3f39d566a2f4e1b737278b8350c23b3f66e9f35fcd21011f640c0ab8af65ef6fd3273291b783fba62d1f226e55e754e67490d03b7c549a06ceb2043dcc882f38c35592ea93c6ae4770b028ec65e0ad2826c069f04d41d567f3d2a99861d639cab1bec98aa32709ce8f114c9d913b858de0a9282e1aab19ea6f3e32cbc5c827261d4", 0xc6}, {&(0x7f00000027c0)="37ed7fc6fffa6af83a12b85a677c259f7b0704243244c574fc80e3001125dd4dccb86db4fd3f691ece725c5f75e723632ab2ff9ebdda4b1f04aa925d5fbb1803b21d30d36c6a631dc9a7d715b1370e4e8b19931ba86f96d3fbdb7576b09ba10ffbd420c41dc2ff81fb035ab613e36e73f5a72f4acea986d5f7ee0cec6c619d4aa1ac22b823fd84630a111c4e8193bff43c3847009c3b2a2959da3def4ad9983714f642ad760ac9b1b404c1a58123ea16d4be55f6d6fbd23f4a13626deb46b3d064dae1b7c61fb0b90d56d95d3954f4fcb265316c6fd639d2d8a079d8eea490f5cbf3d4bf80b4b7ff1abba8cfddaaf22bb540fae983736fe9985f8f1b46748661b733d268cd68ecfd420d99bc3c75ec1632fa5f78db7be4418a5c0769a9f9643faf16269f20746c87b22d17e86ec788b788c3ecb820eff530c3aaa9a72405f8c587d4a94ee910a94a5fc2f704aa8974be112f17ea852da68da6d71def83af8ea14f43e4a1d79bfb6cce2a012a3481a4a7c0250222e1a4054a50a3b0ecbdfe56465fccf6c7c1dbf9030ff4f4f72e8741c351e9533d63ea0fed458b79516b9fa7643536d65bc406602a85c40ab28688d8a87f3cfbfd52decf46fc7fe047e6ec93ebfb51fd94f13d3349a9b5c3e053f31e962c18f79f7438b375139d0accfa90be234bced8a4e4270b46f58c96e87723a16305dde914802b3fad5e10b9da4306c56b6b6fa2ccdba959ce5fe7c7d9294a2b4cab589fbd881145b5803599cf166b1f298116726b7a6a41424b5dde747049fd395c069663cab9fb89ea4ed833d50671bb2825abdecceac809e91c89b43aa6e7a277257b9c3115cd06ccd124cbd5ac2fabac90f616e5f026e1a05f31846ba84e09a8546816e69d10f7ecb22a134f12ce71602ddaff624bc3d4bcad0325d3e06180f95e6ab3a20dfbfdaea25153b2478af3e181bdbfa7c707d8941ca12a897aee337ec9527ecb5bda5c05fa3bba4d2897d47da00019bf5260ca495735f42a546088932b9bcc3e77122a37b5a22f42f13dbf434fd11b9cf99bbd8630ec8b5fb984fb5b92ce75df0f506420337be1c10f5bb659488a5f1f4addf4b21fb53afe2d1d9a29c06679c7aee9a3e1c254b21fdc1a90268c0f66dd39ca530c5992d3230a68d18eb282e141b43c19e2c535cd19013ec92c30c6171882b81545921eb6ddc83226fb220e2e523cd460687937d028c1089fd54a8d48051e130f13668e6141c1c361a60c08d191c931b13ea8c9db7dd0e7fd20cff4888b5225c224039b99dad8c957354ce5fbee577c877f6655282bcca6e855a2ad53c44b575ac0a5a7fcdc27059b23f86b42ed4e55aeb0b10c24d9171c3e76626a2a19c6c7a9860724703bf2d057f97fc2e2bec59cb06ef943f2f2819bbf5489f35ca8377b221e2badd0b683b48ea9968c3405abe6e149ff82582b071eba3131ee927c4039d5292ae502699ab7dbf25545dd3def90da7393669c27911918eb275558b6fbf79259a4a2a7a53e7347a81e2c849e90aedef016258d63e22db3a0675666084dfa32a679adacf1bb6444a616454642572f1726fe6d61bda3339d969defead8ab0a33483052611a530db3f7750716594ad39344ac36a4cd60ea44771877833bd30c7d0e1d1c39247b094304b733bf4286dc0f11fd3771a04c93f27ce02bb2e5b76065a3081a90ce126e1d0ed03fde699152232ce5717ca80cd2b22c190e51331430f50de23678cc2721c592de7951c729beb35cb11b74830a643085d47e252d8299595e721f17d35402951ebf3d9569a43429c5707e5f1840779610665534fc0c847afdea2a0bd3193a9c8384b0b9ee569c3ecf65e4332a006e347bf36f052f9ebad931a88791f0871aac4486f493d0c9252f48fcd4e599e3fdb696793ac97d394e350edda18546e8dd89466edc367659b21a65215a6fbc4c71be8c31756490204be236dc396b8fda3e5d5efd17b698c24fecf59ce3723d35e4344006dbe54f249de0ecd77f5aaf353279f7faf9b83778edef4343ea353b63443674cace4c42fb9c21a22fb41539d3728f148b0f7defba18c9dedbd94351d7b144679b1944fd7ca1658abb46b23d6c6d78be35eaa01a65c56e9ed7bdb348b4254294876705a3ecece2b7f74af64c4d62997d6bcf6d7ca90919c0b39a4ae3793d5737cb1302e1c0fef5cf6c6c5b871edc9a8e7ca9438251bb352c0a0f0423ef57b0ffeeaec980b5002246f4d62026fa5868b43ed52dcbcc4e28d2759b7a138c466f3537e39cca4bec445af5cc8a684e72a64c23ae3f74631a97349136cc8c65a20fbb34015877af2fdb454b24c30974276314d20eb35420fabe759d7860fca8f32984b355ceb29054a890984489b0b07c8c536d8b7976fd028ef61264e33cdce8d3e14e61020631aa9368f1f0dbf19fa7ea4cb91111a47112c2200f9171b96d70092d0375e829f9ee60f9303cf3975e8ba8ba6fb2738fd10fe512c05f141328b0e59135b8267426e2d38bba11e12e52f82af9c847a98b80fcfde0ee69ea42e03fde7aa81eb524dd0fac21db43695c52974b2877aa7d9433e42bda40e8156759d009b39ccddc57b247c9176282dc2c45b4e98f783f76f33e685cd46268592b8a2ade3c326303af6525ad9bf28c6ecf1521642e73cd4d41fe75b1b5d955e5cbdfe4820da257ab4cf9de18815f9d09e6c60125936beeefaf13eefe41297e4ec477a9a35683d73e510efa2c67fc2f69e7175437b078472597d7bf9ca4657edd6cae6713068d3608b3c432116b9897abcb19662f42be84d75bb84c10b5c260ad27625e331bc11ea132ef54489e597b204cf7cacf0644ba0daa78e5e4fcdda37a287b93cbaf955501982cb1fcbd911a0e4946a2a8df8beef09e6115261775f04c4573886cf4c71489a427a29f2d9403a2bdb56f536a8a250d6f3ca6ed87fb1e95400d97ffe742bba7f935996ee621f32682d5d28e3e9e3de7411c21e5a0d416cb9051874871f77de363df2120541e58b5a68d24a74a0b9fe0adb97ee560353c70e80c6d59304275ae0e3ff4451e2234dc03a87a72feb6e6dafc072e0b229164c0f83d77011442356fa5af3634f3174aa4491ddf5de12cb1a30aa037cda1037742f52d868a6f94151170841c199271f0e62cdf55b437b5cdab6cce5b67a2a26523106d2922634317fb868b94915bd98669214fc572062a7572c22daec9e616b977c93f204e835b126c9dff21767c4166532ba508e6383b0fcbff9abaa80e217457eb321983f6418ef42249f47e170751e1ecec5dd42605d9b8fbb6eac2afbe06bc0f2ebe9c57df724e075d9ca01ec3eeb5f3ccb6516f99b64a78091bab2f4535074f33ba64c3f7c4b10e24f0ac9fb1b6441d887c8011b9a009635fdd020ebd099a95e56c9e5068c0c4bdbc65a0c1a49f99c3805796d0dabf09e2f26ace8507389c36d3b22d0fcb923d8c43d1456ea56d9005ac27077953b928ad0d0b0a965b70762f73fb1b90452f6008304dd2bf2e7146581792b5fe3ce15610e636d2f3e757bcc6106162568f601aa44be7ddc740ab4bea1293bc8d5133c60f7489d5bbd9274ae1cbf31cf5dd68c680bc5916c6167fa08f1d77d1c7ca81aa445c9bc6dcfa1fd20292b90e746e43c38f32ac2640a5cd407152615b417a18e9ad6e5aa94077b579434a20799644a70ba84fff881c2ade52b3dafec5e112ed1dc7ed41997dc23e1730879765e6c49a4e772a05d83d4a547f4df1db9aefb2e86313244a072290fdfbb7a5f570482542443106ff0a961260c6500539d62875752f0063aaed9bdf3ae0a65a803ed98c82b93f7b6150f0792dba710961f1cc4db3d459ad2871198470fde28df598fef7361c08cf2d5f28ef111703531b45263025dc1a8716ae08fc9f5a52c671c2f774ebef96042b54f427c35a64418360183c695d22d3560a6e2b01a3886113a7ffc2c0d87645a97ded067e7067c028d448a6817367b889fa193d5975ce15a8e665dddcf22e61762e38cce14bb15746f0c44219b092eb9a4fc8a6c9ddad9a74c49f58769e754100b583c6e1d6c909bc26a40cb76a385718e59ed5a2a791d4288221f37e8b1ce863bedbe31569bd396ed0a1f6909ae931d213fab2a140b16775d1db7da60aad07a2610f7472e9e7157a1b7621de3efa1bc8aec99dcf70f50fa291b69bc47d8c9d04c8b18a952eac5dabf0d78fd7fbc25a26919626807ded2c6d25e891fec2f84a5ac822d0c98d865e0d06519f019a7ce5d13c1d3289a7fc70f26baee7f4e3988027d753a79562dbaa5c419db77ecde101d3b15b2cc4f92d176e932cab7af5a0f318c5afdbbb0750af698dfbb8eb6ee6825040d95d7cade463dcb81a770789a4b48a49298b84b18d61caa7224634ecd8653369158f88a85c13b99d7f255cbefb79c9628f73f2b3ef40560216d21f8513ff17bc8faa11c2a1bc25b2040dd23826916ddefc1e7631c508a6f04da29ec3e8b8af40b9fab617fb7648e23fe4ba20cc965813b9c7f2129312c2b74cf367547907b1e3d9c4edb633088f11601e2d868dfaa4e4d8a7ef834af3d483a3a4b88c6acd37818befa2c6b544c392b472de4e25c73264dbeba5ce6fc9c65cd7d1f411e743517d5920cb81337bcf562c94e332a07bf0684e7df8d9ac36471ae12416f546436dc3b78d883cbd5be836ce1f70bd494352c32b89e736028f083ae0df7fd4a3c864324a20d99e944548678532d4f43e14e1792fea757f0000a903e840542110ba639245c8e5d778fe23c21b0d36caf004b92891d9e9ede9e073d2dae8827e915283378d90177a1503e856c4d7ab95cbbd7c05c20d1b7982ca916e5885803af0b08c861c559345466eef0ccd2994cd245ee6e70c9a45686f42670c2459954c0b67e521b982a49fac07d76110c292dc967abc34aaba9bb6bff02f331d188865dda6e043ba7b4f81577376f677222fd474ce8c1265a0585fff23146489450cc880f8db2d167d2c5635e49ba2da7edca21989a560f1cb09b397ae9745780ad2de4e6929b9051deeeee94fc7fdb6a6a1a3934a0b1e4d4cbb3499684bc0c5a51a331770d4e8eaac07d56370b5f0ccdfa1781efe91870213ae5ffca24c9a0686cc51da1050ffabb99172f0f26c57eadd23eda13892b63ada2ac181d540c07999db18ccc4b5155556d2170458739f7e54de2272aa1833134998672a7e90e30eb04691171bfd55623ebaffdc5e83abe6600f24ee7bf9b511a10720a61115115201156d3258c8fb769543a27b28dfe731811791c5d4c97595ab3694e00309ee343048a6dbae364cdfbb7c3d73e5a23340f050aff74a85dc6130ac158afe63c39816e5188c240e450d1bb853d5470654494365d95ed80b94f4088fbf69f79ce79641d355aacb6d7e25de813dbb26bff864a59060d8f24e0261f5a038e2bdd36ba16e320b71cbcc960def1270785cfb0033a4f340fb4ad4b0d48f7a769e29cd8dc2c0f188bb707beaa7ce95e1dbcbf8782feab86dccca789f7da3933ea8370d4e7c01d269300ffb4944c217e71d6a0b0ee0ea56245b1e87e19f3c8cd5a70c93547ffe2982ebd6dc6253ece72cac73e61515e3a485a5f9942c3fd8bebb0086626cec3a8220df520d3f7df7787f1fc9d3301f89cc0671ec8059d460822d82effbb9d0b400a9e4480c5705ac264b97c959cec6e0c5136357b3f959fa34ff070453fbb9c12a415f83d1a131f575f384790a31ec2de30a76730b1ae0528427963d542badde90714143fac51698a73cc64159acaa281e69d067001739c095402eeb5adc10ec0a6e06b835087db176f8ac58b34a", 0x1000}, {&(0x7f00000037c0)="a7dafddd3137664e12948dc084463f6c7338d677ea2d00e6b05a39cca70c9e630b1260d06cb5b1812c907311718201c907d7f6847ebc6b927c8d7034c3f41da466b43c", 0x43}, {&(0x7f0000003840)="fff6b050682bd1b4fa1d6d943007718da612ef465dfd5f33217647817d5edf76014ce4cef90ded9be2347e95f6f93fe6d04aaeae6cde4c490f2824587109481aa8b9fa555f8a4dcafaa198ca2702d6b0a7788785f58f790eafd67253fc7c20a19d1451ca28ac687f0d71dd2e61ad163814fa115e8ab0395bc8e619bc5938e34339161b04998b5d32029a16633f6e82af23f5ff5075", 0x95}], 0x5, &(0x7f0000003a80)=[@cred={{0x1c, 0x1, 0x2, {0x0, 0x0, 0xee01}}}, @cred={{0x1c, 0x1, 0x2, {0xffffffffffffffff, 0xee01}}}, @cred={{0x1c}}, @cred={{0x1c, 0x1, 0x2, {0xffffffffffffffff, 0x0, 0xee01}}}, @cred={{0x1c, 0x1, 0x2, {0x0, 0xee01}}}, @rights={{0x2c, 0x1, 0x1, [r0, 0xffffffffffffffff, 0xffffffffffffffff, r0, r0, 0xffffffffffffffff, 0xffffffffffffffff]}}], 0xd0}}, {{0x0, 0x0, &(0x7f0000003f40)=[{&(0x7f0000003b80)="70b377c70387f644a405f4f2e35e721b7ba54c302fa4cb17a4bc8766e7374451fccdc1a1c6078e2fc17047", 0x2b}, {&(0x7f0000003bc0)="e6125e16d7a8df93b40f4fe2a2f1680aa50a616a96a529701dd568d52b5977a804fd8c0fd5437bd7e61314cb97c9a47e06edc44d096126476b022ab6ebb41765134232aec9e0ceea89ffd2787bd9f8e3", 0x50}, {&(0x7f0000003c40)="d6447e435b1de4c192b66a27a92b8b7585e864a464", 0x15}, {&(0x7f0000003c80)="129d885b801261fc14813be0a34b56d6978c91ce3f", 0x15}, {&(0x7f0000003cc0)}, {&(0x7f0000003d00)="eb19896a32f2d21ea6de7080c4d23e451c283764f16ca7ca0da11ee4", 0x1c}, {&(0x7f0000003d40)="f0b456d22c3170eb7297c7b2f3d042c7b6b39636a8a80935cc2c3e0c455da9a259260c91be5e35b7d8aa172375304515e081cc995206c07b9c77188a9fa4c077547d58994864e8b45d1378274b33063c1409a5e9537742343a604b203d606ec24b9694bb4bfad975fbb2c37c70137ed0e4818ba986a293ecb1b946193da9bcbb2efc24009ff0ee098d6df07ff2eb7c39a0eee42faf94e96435176ad6200ed15203ac993743e46a80736f7b25c69162f3eab0b453d787adebb9210b22ba11e6040ba7ffe97d53d4addeba80fa29388171c595b0c5065273f3875a254c0d9dd579953fda006e7cfb768d5abb3b6531116b9f51726d10b840", 0xf7}, {&(0x7f0000003e40)="e779fbbd622064ca2c25a9abef22da29971cec5f862e42a2d40ae66af52a2458448f496655aafedee5b7664ae8c21119267849cc7788ae2b1332e890d866c36da6f78e6827c1e059c9ca969647b7b9e8fb343232cbea9e39243bbe3fa33ba32aad6947033b832de2a25b53abfb95231299b7e640f5aef240da130e53c6fe9e162d555a223bb24500319269212dbdaea89f718af02183beb5f268a5a5e4e0edebdf0cfa9737ad6844b9fbd46c1b4f4b6d5cbcd3eed073180a3ac9f8b42c4aa4c4568e84dd690bd27e65c3a2ba299f54c3", 0xd0}], 0x8, &(0x7f0000003fc0)=[@rights={{0x14, 0x1, 0x1, [0xffffffffffffffff]}}, @cred={{0x1c, 0x1, 0x2, {0x0, 0x0, r11}}}], 0x38, 0x4000000}}], 0x7, 0x20048000) (async, rerun: 32) write$binfmt_script(r0, &(0x7f00000000c0)={'#! ', './file0'}, 0xb) r16 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='cgroup.controllers\x00', 0xb00000000065808, 0x0) (async, rerun: 32) r17 = socket$nl_generic(0x10, 0x3, 0x10) (rerun: 32) sendfile(r17, r16, 0x0, 0x100000002) (async) openat$cgroup_ro(r16, &(0x7f0000000000)='cpuset.memory_pressure_enabled\x00', 0x0, 0x0) 01:55:27 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x1601, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1978.084243][T28267] bond1023: (slave bridge987): Enslaving as an active interface with an up link [ 1978.100479][T28271] validate_nla: 14 callbacks suppressed [ 1978.100496][T28271] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 1978.189433][T28271] bond361: entered promiscuous mode [ 1978.195331][T28271] 8021q: adding VLAN 0 to HW filter on device bond361 01:55:27 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, 0x0}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x262, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) 01:55:27 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000000)='cgroup.stat\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f00000000c0)={'#! ', './file0'}, 0xb) 01:55:28 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000000)='cgroup.stat\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f00000000c0)={'#! ', './file0'}, 0xb) (async) write$binfmt_script(r0, &(0x7f00000000c0)={'#! ', './file0'}, 0xb) [ 1978.359894][T28277] bond971: (slave bridge929): making interface the new active one [ 1978.379482][T28277] bridge929: entered promiscuous mode 01:55:28 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000000)='cgroup.stat\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f00000000c0)={'#! ', './file0'}, 0xb) 01:55:28 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0xac01, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1978.405189][T28277] bond971: (slave bridge929): Enslaving as an active interface with an up link [ 1978.508313][T28280] netlink: 'syz-executor.2': attribute type 1 has an invalid length. [ 1978.599430][T28280] bond919: entered promiscuous mode [ 1978.607118][T28280] 8021q: adding VLAN 0 to HW filter on device bond919 [ 1978.654813][T28282] bond919: (slave bridge886): making interface the new active one [ 1978.668274][T28282] bridge886: entered promiscuous mode [ 1978.679665][T28282] bond919: (slave bridge886): Enslaving as an active interface with an up link 01:55:28 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0xd0, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) 01:55:28 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000000)='blkio.bfq.sectors\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f0000000100)=ANY=[@ANYBLOB="2321232e2f66696c83fb09f8d24bbffc814d70e55f482fa0ad7448d087d09165226114ae12056c63ebf0bfa0a6b7bb4e1fc2cace7c5ca05e9e49c32e506ef6f5d59f2b3da1a50000000000"], 0x4a) ioctl$PPPIOCNEWUNIT(0xffffffffffffffff, 0xc004743e, &(0x7f0000000040)=0x1) [ 1978.705968][T28308] netlink: 'syz-executor.0': attribute type 1 has an invalid length. 01:55:28 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000000)='blkio.bfq.sectors\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f0000000100)=ANY=[@ANYBLOB="2321232e2f66696c83fb09f8d24bbffc814d70e55f482fa0ad7448d087d09165226114ae12056c63ebf0bfa0a6b7bb4e1fc2cace7c5ca05e9e49c32e506ef6f5d59f2b3da1a50000000000"], 0x4a) (async) ioctl$PPPIOCNEWUNIT(0xffffffffffffffff, 0xc004743e, &(0x7f0000000040)=0x1) [ 1978.799066][T28308] bond1024: entered promiscuous mode [ 1978.829401][T28308] 8021q: adding VLAN 0 to HW filter on device bond1024 01:55:28 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000000)='blkio.bfq.sectors\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f0000000100)=ANY=[@ANYBLOB="2321232e2f66696c83fb09f8d24bbffc814d70e55f482fa0ad7448d087d09165226114ae12056c63ebf0bfa0a6b7bb4e1fc2cace7c5ca05e9e49c32e506ef6f5d59f2b3da1a50000000000"], 0x4a) ioctl$PPPIOCNEWUNIT(0xffffffffffffffff, 0xc004743e, &(0x7f0000000040)=0x1) [ 1978.918860][T28316] bond1024: (slave bridge988): making interface the new active one [ 1978.938084][T28316] bridge988: entered promiscuous mode [ 1978.955703][T28316] bond1024: (slave bridge988): Enslaving as an active interface with an up link [ 1978.972252][T28317] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 1979.034133][T28317] bond530: entered promiscuous mode [ 1979.044040][T28317] 8021q: adding VLAN 0 to HW filter on device bond530 [ 1979.069344][T28320] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 1979.126865][T28320] bond362: entered promiscuous mode [ 1979.136318][T28320] 8021q: adding VLAN 0 to HW filter on device bond362 01:55:28 executing program 1: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) (async) listen(r0, 0x0) accept4(r0, 0x0, 0x0, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket$netlink(0x10, 0x3, 0x0) (async) r3 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r3, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r3, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afcffd8d06e01000081c5", @ANYRES32=r4, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) (async) sendmsg$nl_route(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r4}]}, 0x4c}}, 0x0) 01:55:28 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000040)='pids.current\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f00000000c0)={'#! ', './file0'}, 0xb) 01:55:28 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x1800, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1979.175076][T28321] bond530: (slave bridge470): making interface the new active one [ 1979.183212][T28321] bridge470: entered promiscuous mode [ 1979.192977][T28321] bond530: (slave bridge470): Enslaving as an active interface with an up link 01:55:28 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, 0x0}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x262, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) 01:55:28 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000040)='pids.current\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f00000000c0)={'#! ', './file0'}, 0xb) [ 1979.271276][T28331] netlink: 'syz-executor.3': attribute type 1 has an invalid length. 01:55:29 executing program 5: openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000040)='pids.current\x00', 0x275a, 0x0) (async) r0 = openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000040)='pids.current\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f00000000c0)={'#! ', './file0'}, 0xb) [ 1979.380653][T28331] bond972: entered promiscuous mode [ 1979.394409][T28331] 8021q: adding VLAN 0 to HW filter on device bond972 01:55:29 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) r1 = socket$inet6_tcp(0xa, 0x1, 0x0) sendto$inet6(r1, 0x0, 0x0, 0x20000004, &(0x7f0000000080)={0xa, 0x4e22}, 0x1c) recvfrom$inet6(r1, &(0x7f0000000000)=""/26, 0x1a, 0x40004050, &(0x7f0000001880)={0xa, 0x0, 0x0, @rand_addr=' \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02'}, 0x1c) shutdown(r1, 0x0) write$binfmt_script(r0, &(0x7f00000000c0)={'#! ', './file0', [{0x20, '\':.%)/-.:%'}], 0xa, "8dddcab91762e11845234f867be78db87b097205f9b3e0923becdeba06204cf859f44e38c10985f2aca5184c3c7edfd3d475ee0dbd7b5de5e8b206e7cf5e71ef55196a5c41199f067f7b413d8f417cfd2f17185a791062571868b0a87026ab04134d0f56108eab218431d714"}, 0x82) ioctl$sock_kcm_SIOCKCMCLONE(r0, 0x89e2, &(0x7f0000000040)={r1}) sendto$inet6(r2, &(0x7f0000000180)="7162450c1c6963a25f91bd0ad2e93bcc3b603c26df29321e0d0effe5c9e2d1eea6f98273df33", 0x26, 0x40016, &(0x7f00000001c0)={0xa, 0x4e21, 0x8, @private1, 0x1}, 0x1c) sendmsg$nl_route_sched(r0, &(0x7f00000002c0)={&(0x7f0000000200)={0x10, 0x0, 0x0, 0x8}, 0xc, &(0x7f0000000280)={&(0x7f0000000240)=@getqdisc={0x2c, 0x26, 0x300, 0x70bd2a, 0x25dfdbfe, {0x0, 0x0, 0x0, 0x0, {0x10, 0xf}, {0xfff1, 0xb}, {0x7, 0xfff3}}, [{0x4}, {0x4}]}, 0x2c}, 0x1, 0x0, 0x0, 0x20004001}, 0x20000004) setsockopt$inet6_MCAST_JOIN_GROUP(r1, 0x29, 0x2a, &(0x7f0000000300)={0x4, {{0xa, 0x4e21, 0x0, @ipv4={'\x00', '\xff\xff', @dev={0xac, 0x14, 0x14, 0x35}}, 0xffffffff}}}, 0x88) [ 1979.509237][T28334] bond972: (slave bridge930): making interface the new active one [ 1979.517513][T28334] bridge930: entered promiscuous mode [ 1979.528829][T28334] bond972: (slave bridge930): Enslaving as an active interface with an up link 01:55:29 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0xba00, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1979.589384][T28339] netlink: 'syz-executor.2': attribute type 1 has an invalid length. [ 1979.672403][T28339] bond920: entered promiscuous mode [ 1979.677915][T28339] 8021q: adding VLAN 0 to HW filter on device bond920 [ 1979.727776][T28343] bond920: (slave bridge887): making interface the new active one [ 1979.741978][T28343] bridge887: entered promiscuous mode [ 1979.759769][T28343] bond920: (slave bridge887): Enslaving as an active interface with an up link 01:55:29 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0xf0, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1979.771513][T28354] netlink: 'syz-executor.0': attribute type 1 has an invalid length. [ 1979.832259][T28354] bond1025: entered promiscuous mode [ 1979.838080][T28354] 8021q: adding VLAN 0 to HW filter on device bond1025 01:55:29 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x1801, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1979.893793][T28362] bond1025: (slave bridge989): making interface the new active one [ 1979.902470][T28362] bridge989: entered promiscuous mode [ 1979.914409][T28362] bond1025: (slave bridge989): Enslaving as an active interface with an up link [ 1980.002389][T28358] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 1980.065462][T28358] bond531: entered promiscuous mode [ 1980.076027][T28358] 8021q: adding VLAN 0 to HW filter on device bond531 [ 1980.087547][T28364] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 01:55:29 executing program 1: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) (async, rerun: 64) listen(r0, 0x0) (rerun: 64) accept4(r0, 0x0, 0x0, 0x0) (async, rerun: 64) r1 = socket$netlink(0x10, 0x3, 0x0) (async, rerun: 64) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r3, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) (async, rerun: 32) getsockname$packet(r3, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) (rerun: 32) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afcffd8d06e01000081c5", @ANYRES32=r4, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) (async, rerun: 32) sendmsg$nl_route(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r4}]}, 0x4c}}, 0x0) (rerun: 32) [ 1980.142636][T28364] bond363: entered promiscuous mode [ 1980.148477][T28364] 8021q: adding VLAN 0 to HW filter on device bond363 01:55:29 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, 0x0}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x262, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) 01:55:29 executing program 1: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r0, 0x0) accept4(r0, 0x0, 0x0, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r3, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r3, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r4, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) r5 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r5, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r5, 0x0) r6 = accept4(r5, 0x0, 0x0, 0x0) connect$unix(r6, &(0x7f0000000080)=@file={0x0, './file0\x00'}, 0x6e) sendto$inet6(r6, &(0x7f00000000c0), 0xfffffdda, 0x0, 0x0, 0x600000000000004) r7 = syz_init_net_socket$nl_rdma(0x10, 0x3, 0x10) r8 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000940), 0xffffffffffffffff) ioctl$sock_SIOCGIFINDEX_80211(r7, 0x8933, &(0x7f0000000000)={'wlan1\x00', 0x0}) sendmsg$NL80211_CMD_REGISTER_BEACONS(r7, &(0x7f0000000100)={0x0, 0x0, &(0x7f00000000c0)={&(0x7f0000000500)=ANY=[@ANYBLOB='$\x00\x00\x00', @ANYRES16=r8, @ANYBLOB="0700000000000000000002000000080002002e02f00008000300", @ANYRES32=r9], 0x24}}, 0x0) sendmsg$NL80211_CMD_SET_MCAST_RATE(r6, &(0x7f00000001c0)={&(0x7f0000000100)={0x10, 0x0, 0x0, 0x48000000}, 0xc, &(0x7f0000000180)={&(0x7f0000000140)={0x1c, r8, 0x8, 0x70bd29, 0x25dfdbfc, {{}, {@void, @void}}, [@NL80211_ATTR_MCAST_RATE={0x8, 0x6b, 0xa}]}, 0x1c}, 0x1, 0x0, 0x0, 0x8800}, 0x2400c804) sendmsg$nl_route(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r4}]}, 0x4c}}, 0x0) [ 1980.330398][T28377] bond973: entered promiscuous mode [ 1980.359495][T28377] 8021q: adding VLAN 0 to HW filter on device bond973 01:55:30 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) (async) r1 = socket$inet6_tcp(0xa, 0x1, 0x0) sendto$inet6(r1, 0x0, 0x0, 0x20000004, &(0x7f0000000080)={0xa, 0x4e22}, 0x1c) (async, rerun: 32) recvfrom$inet6(r1, &(0x7f0000000000)=""/26, 0x1a, 0x40004050, &(0x7f0000001880)={0xa, 0x0, 0x0, @rand_addr=' \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02'}, 0x1c) (async, rerun: 32) shutdown(r1, 0x0) write$binfmt_script(r0, &(0x7f00000000c0)={'#! ', './file0', [{0x20, '\':.%)/-.:%'}], 0xa, "8dddcab91762e11845234f867be78db87b097205f9b3e0923becdeba06204cf859f44e38c10985f2aca5184c3c7edfd3d475ee0dbd7b5de5e8b206e7cf5e71ef55196a5c41199f067f7b413d8f417cfd2f17185a791062571868b0a87026ab04134d0f56108eab218431d714"}, 0x82) (async) ioctl$sock_kcm_SIOCKCMCLONE(r0, 0x89e2, &(0x7f0000000040)={r1}) sendto$inet6(r2, &(0x7f0000000180)="7162450c1c6963a25f91bd0ad2e93bcc3b603c26df29321e0d0effe5c9e2d1eea6f98273df33", 0x26, 0x40016, &(0x7f00000001c0)={0xa, 0x4e21, 0x8, @private1, 0x1}, 0x1c) (async, rerun: 64) sendmsg$nl_route_sched(r0, &(0x7f00000002c0)={&(0x7f0000000200)={0x10, 0x0, 0x0, 0x8}, 0xc, &(0x7f0000000280)={&(0x7f0000000240)=@getqdisc={0x2c, 0x26, 0x300, 0x70bd2a, 0x25dfdbfe, {0x0, 0x0, 0x0, 0x0, {0x10, 0xf}, {0xfff1, 0xb}, {0x7, 0xfff3}}, [{0x4}, {0x4}]}, 0x2c}, 0x1, 0x0, 0x0, 0x20004001}, 0x20000004) (rerun: 64) setsockopt$inet6_MCAST_JOIN_GROUP(r1, 0x29, 0x2a, &(0x7f0000000300)={0x4, {{0xa, 0x4e21, 0x0, @ipv4={'\x00', '\xff\xff', @dev={0xac, 0x14, 0x14, 0x35}}, 0xffffffff}}}, 0x88) 01:55:30 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) r1 = socket$inet6_tcp(0xa, 0x1, 0x0) sendto$inet6(r1, 0x0, 0x0, 0x20000004, &(0x7f0000000080)={0xa, 0x4e22}, 0x1c) (async) recvfrom$inet6(r1, &(0x7f0000000000)=""/26, 0x1a, 0x40004050, &(0x7f0000001880)={0xa, 0x0, 0x0, @rand_addr=' \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02'}, 0x1c) (async) shutdown(r1, 0x0) write$binfmt_script(r0, &(0x7f00000000c0)={'#! ', './file0', [{0x20, '\':.%)/-.:%'}], 0xa, "8dddcab91762e11845234f867be78db87b097205f9b3e0923becdeba06204cf859f44e38c10985f2aca5184c3c7edfd3d475ee0dbd7b5de5e8b206e7cf5e71ef55196a5c41199f067f7b413d8f417cfd2f17185a791062571868b0a87026ab04134d0f56108eab218431d714"}, 0x82) (async) ioctl$sock_kcm_SIOCKCMCLONE(r0, 0x89e2, &(0x7f0000000040)={r1}) sendto$inet6(r2, &(0x7f0000000180)="7162450c1c6963a25f91bd0ad2e93bcc3b603c26df29321e0d0effe5c9e2d1eea6f98273df33", 0x26, 0x40016, &(0x7f00000001c0)={0xa, 0x4e21, 0x8, @private1, 0x1}, 0x1c) (async, rerun: 32) sendmsg$nl_route_sched(r0, &(0x7f00000002c0)={&(0x7f0000000200)={0x10, 0x0, 0x0, 0x8}, 0xc, &(0x7f0000000280)={&(0x7f0000000240)=@getqdisc={0x2c, 0x26, 0x300, 0x70bd2a, 0x25dfdbfe, {0x0, 0x0, 0x0, 0x0, {0x10, 0xf}, {0xfff1, 0xb}, {0x7, 0xfff3}}, [{0x4}, {0x4}]}, 0x2c}, 0x1, 0x0, 0x0, 0x20004001}, 0x20000004) (async, rerun: 32) setsockopt$inet6_MCAST_JOIN_GROUP(r1, 0x29, 0x2a, &(0x7f0000000300)={0x4, {{0xa, 0x4e21, 0x0, @ipv4={'\x00', '\xff\xff', @dev={0xac, 0x14, 0x14, 0x35}}, 0xffffffff}}}, 0x88) [ 1980.541316][T28378] bond973: (slave bridge931): making interface the new active one [ 1980.559294][T28378] bridge931: entered promiscuous mode [ 1980.602475][T28378] bond973: (slave bridge931): Enslaving as an active interface with an up link 01:55:30 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0xba01, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1980.724078][T28382] bond921: entered promiscuous mode [ 1980.775258][T28382] 8021q: adding VLAN 0 to HW filter on device bond921 01:55:30 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0xfc, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1980.856077][T28383] bond921: (slave bridge888): making interface the new active one [ 1980.868835][T28383] bridge888: entered promiscuous mode [ 1980.882633][T28383] bond921: (slave bridge888): Enslaving as an active interface with an up link 01:55:30 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x1a02, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1980.897370][T28385] workqueue: Failed to create a rescuer kthread for wq "bond1026": -EINTR [ 1981.265592][T28401] bond364: entered promiscuous mode 01:55:30 executing program 5: bpf$MAP_GET_NEXT_KEY(0x4, &(0x7f0000004000)={0x1, &(0x7f0000003e80)="db3655c7decac14b9ef20513f0f48b1838d81c53b6c2e9c4399bf698dc473ed4932d6613881a4e8562aa809ff93ee83d0edf8cfe2fbad1505874395d481e046eb6a72d", &(0x7f0000003f00)=""/220}, 0x20) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(0xffffffffffffffff, &(0x7f0000004040)={'#! ', './file0', [{0x20, 'memory.events\x00'}, {0x20, 'memory.events\x00'}, {}, {0x20, '!\x9e-\x1c'}], 0xa, "b38b8319a2197289e69c0e6810b151343a89de65f352c6ae8caafe008b6477dd4f981de745e44805a5bc03e1d82fc9d38f9e1ecf21fe08a14d4756e06b58c574324d02d7ab360c20f02d252278a6bed5899d3dcc856273a6ba5f32c0ff858ccb6a0fb9d46cd308d701f646973ea1a6ddd3d06dc6107cb27c81b48e6b2dc80d61048173411c970ed04e51ae3c0eed596b1571a03188776d50ef739471d03e64ddf4d9245a23d787dd3c095e80abf5e29189c750943b37f64fbe922771f910c4939bcebb10b83afa0a9a22fec1296570cffe60"}, 0x101) write$binfmt_script(r0, &(0x7f00000000c0)=ANY=[@ANYBLOB="00000000aa7809d8560000"], 0xb) recvmmsg(r0, &(0x7f0000003c80)=[{{&(0x7f0000000000)=@pppol2tpv3={0x18, 0x1, {0x0, 0xffffffffffffffff, {0x2, 0x0, @initdev}}}, 0x80, &(0x7f00000002c0)=[{&(0x7f0000000100)=""/145, 0x91}, {&(0x7f00000001c0)=""/217, 0xd9}], 0x2}, 0x1}, {{&(0x7f0000000300)=@pppol2tpv3={0x18, 0x1, {0x0, 0xffffffffffffffff, {0x2, 0x0, @local}}}, 0x80, &(0x7f0000000680)=[{&(0x7f0000004180)=""/171, 0xab}, {&(0x7f0000000440)=""/243, 0xf3}, {&(0x7f0000000540)=""/4, 0x4}, {&(0x7f0000000580)=""/218, 0xda}], 0x4, &(0x7f00000006c0)=""/148, 0x94}, 0x4}, {{&(0x7f0000000780)=@sco, 0x80, &(0x7f0000000b40)=[{&(0x7f0000000800)=""/129, 0x81}, {&(0x7f00000008c0)=""/237, 0xed}, {&(0x7f00000009c0)=""/242, 0xf2}, {&(0x7f0000000ac0)=""/38, 0x26}, {&(0x7f0000003b80)=""/60, 0x3c}], 0x5, &(0x7f0000000bc0)=""/242, 0xf2}, 0x400}, {{0x0, 0x0, &(0x7f0000001f80)=[{&(0x7f0000000cc0)=""/66, 0x42}, {&(0x7f0000000d40)=""/249, 0xf9}, {&(0x7f0000000e40)=""/23, 0x17}, {&(0x7f0000000e80)=""/4096, 0x1000}, {&(0x7f0000001e80)=""/243, 0xf3}], 0x5, &(0x7f0000002000)=""/174, 0xae}, 0x10000}, {{&(0x7f00000020c0)=@rc, 0x80, &(0x7f00000036c0)=[{&(0x7f0000002140)=""/4096, 0x1000}, {&(0x7f0000003140)=""/132, 0x84}, {&(0x7f0000003200)=""/55, 0x37}, {&(0x7f0000003240)=""/163, 0xa3}, {&(0x7f0000003300)=""/222, 0xde}, {&(0x7f0000003400)=""/167, 0xa7}, {&(0x7f00000034c0)=""/177, 0xb1}, {&(0x7f0000003580)=""/85, 0x55}, {&(0x7f0000003600)=""/178, 0xb2}], 0x9}, 0x800}, {{&(0x7f0000003780)=@nfc_llcp, 0x80, &(0x7f00000038c0)=[{&(0x7f0000003800)=""/17, 0x11}, {&(0x7f0000003840)=""/102, 0x66}], 0x2}, 0x80}, {{&(0x7f0000003900)=@ll={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @dev}, 0x80, &(0x7f0000003b40)=[{&(0x7f0000003980)=""/243, 0xf3}, {&(0x7f0000003a80)=""/30, 0x1e}, {&(0x7f0000003ac0)=""/71, 0x47}], 0x3, &(0x7f0000008700)=""/233, 0xe9}, 0x8001}], 0x7, 0x40002022, &(0x7f0000003e40)={0x0, 0x989680}) r1 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r1, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r1, 0x0) r2 = accept4(r1, 0x0, 0x0, 0x0) connect$unix(r2, &(0x7f0000000080)=@file={0x0, './file0\x00'}, 0x6e) sendto$inet6(r2, &(0x7f00000000c0), 0xfffffdda, 0x0, 0x0, 0x600000000000004) r3 = getgid() sendmsg$unix(0xffffffffffffffff, &(0x7f0000000580)={&(0x7f0000000100)=@abs={0x1, 0x0, 0x4e24}, 0x6e, &(0x7f0000000400)=[{&(0x7f0000000180)="94942c3d1e007dfb8404de29a8697799b1f5d6823a70813d4cc3415c6f862e8ceaac7242aef16f9f7c571f15aaacea204d20b49c43182fe1dd3de88c4a06101fc1f8d6139579492cca024fe7db0bd605ad17f17bfaab7d62fb0b847e05f9c41fbfaf79a513efae1ba322990f1327d42eabce0b83ee4fb2b875a3c4f9a1b2", 0x7e}, {&(0x7f0000000200)="993ccb04b5af9377cad757d9dbbe8345526644635ab0ecc50c5c9b41303e1e1f5b1f6161ff3f0a61f3f51dcf5eab537b55b5db80ddea43032815b7908ef405941077ae8e58627fe7265438edb56ef1b6918735c74b3b8fb318d24c30d06cd07d15f385dfd52cd11a49d23837a38ef8284140bcc827accc91e3fb964378ab5da48352949a0f4b27797b96b083028f2f6bdb579e6c1ea1809c644b8e841bb7bc0eb312d29e9fea73a71744649b830f244576a3b1b8f50150c6379a7ada43987439be4e1258efbf5d325ee5f0ad6c9d909bd73a187d299cd9d782beb7a8b2524cf2b61d2dba7e4acf6764b73c9a034907cdd5b7f547", 0xf4}, {&(0x7f0000000300)="130127c0749a951379b88d7ac86bd00a069d3e5793db16848cac09380ca3c6045e088493f74bbdd96015c04cc03eae1802359cf0a739df19bbbc910c3256b1724713e6e5c4be6c2fd26afc35a60e33dc091785fd017c569eea7264d1416c4ee26bc35c2a3ee4c8f285c9da4f7d78ed6613140dfff54f048b51827b8380edffcbbc154571185532f83a58dcf55a3657ebb73d8a261228568bf32c1e5ed7a414a2f8b30a24d5b952ae26d33311c4d23fa6db921a7464444692273f8476e09803bb860b51baeabc34a7828152bba3533e16df0300294a425b07d1a38b122b7ca71b5dfb620963", 0xe5}, {&(0x7f0000000080)="24c7145919fb2421ddb6ab6620204c0441c838579e3bbb9693a8c127c2c88f33b33f1766e6b65d233d3216d16c5ebbe3342d", 0x32}], 0x4, &(0x7f00000004c0)=[@rights={{0x18, 0x1, 0x1, [0xffffffffffffffff, 0xffffffffffffffff]}}, @rights={{0x1c, 0x1, 0x1, [0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff]}}, @cred={{0x1c}}, @rights={{0x30, 0x1, 0x1, [0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff]}}, @cred={{0x1c, 0x1, 0x2, {0x0, 0x0, r3}}}], 0xa8, 0x40}, 0x20008840) getsockopt$sock_cred(r1, 0x1, 0x11, &(0x7f0000006c40)={0x0, 0x0, 0x0}, &(0x7f0000006c80)=0xc) r5 = socket$nl_route(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r5, &(0x7f0000000240)={0x0, 0x0, &(0x7f00000002c0)={&(0x7f0000000040)=ANY=[], 0x24}, 0x1, 0x0, 0x0, 0x80}, 0x0) ioctl$sock_SIOCSIFVLAN_ADD_VLAN_CMD(r5, 0x8983, &(0x7f00000000c0)={0x0, 'xfrm0\x00'}) r6 = epoll_create1(0x0) r7 = socket$can_raw(0x1d, 0x3, 0x1) r8 = socket$inet6_icmp_raw(0xa, 0x3, 0x3a) getsockopt$sock_cred(r1, 0x1, 0x11, &(0x7f00000083c0)={0x0, 0x0}, &(0x7f0000008400)=0xc) r10 = socket$inet6_tcp(0xa, 0x1, 0x0) sendto$inet6(r10, 0x0, 0x0, 0x20000004, &(0x7f0000000080)={0xa, 0x4e22}, 0x1c) recvfrom$inet6(r10, &(0x7f0000000000)=""/26, 0x1a, 0x40004050, &(0x7f0000001880)={0xa, 0x0, 0x0, @rand_addr=' \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02'}, 0x1c) r11 = socket$nl_route(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r11, &(0x7f0000000240)={0x0, 0x0, &(0x7f00000002c0)={&(0x7f0000000040)=ANY=[], 0x24}, 0x1, 0x0, 0x0, 0x80}, 0x0) ioctl$sock_SIOCSIFVLAN_ADD_VLAN_CMD(r11, 0x8983, &(0x7f00000000c0)={0x0, 'xfrm0\x00'}) r12 = accept4$inet(r0, &(0x7f0000008440), &(0x7f0000008480)=0x10, 0x100000) sendmmsg$unix(0xffffffffffffffff, &(0x7f0000008580)=[{{&(0x7f0000004240)=@file={0x1, './file0\x00'}, 0x6e, &(0x7f0000005300)=[{&(0x7f00000042c0)="0bc0", 0x2}, {&(0x7f0000004300)="adf4e18f1d9b8ab32a273f0aaa8b3e31b9681a893b97a134a07ef6f9b91dba0bb7ccec423828d8d1cfffcee97f775de89a9fa4387637ff8fd0d6f7f32d68eecb3c982de2fd17320d29ceda7988a293074f7021ec1d3e88285d314f9721d44af57b474ae56abc732c9812e478d357af5eef0b08cf1fde50185e220dffe3857ea3a46f0f78ccb4d1f7d2c7956f9ec30c415694119ff86f4db5692a5074ddb6daef178d7ce5e01f611ba54ab000a007464766fa4e3845ff3ed9f8a279cef6ae088c5da94183e93cd4efe8d4e21cddf5b85fe76bd3657d7db9d8896be7b65e8e5723dc650008532549c0420aedc65c217e9281113bc3ad3dc8ad593a0b64b613f47fdd3d36e46b32010b124dfa0270f274d788a052f2b3934f25292b57954fc09a51bcf7828e6c3db7ad9d3d5d8bd162b71c5a2564e62186e0c96e7e8d1308ed264fa0fbcccd90b2095488ba4939ee7b52b156536fd15856d2b4abbe814ba479d03e62888785fc3ee003b98a197c98f0b4c7af6c58c3f3830989d97a3fca1ecc9c739d225c39e3e3e7d97f58a7051f6f9c7bda689913eec1ff028657a6e7d81265a776a24d34e62834423614952778c4da2e96b03387b2d35979282b8f31fcca7770afd761b94d789dc27ef7bd76b74266ade0ec1efe302a93a67f3fb2256a2833bb464ec16308c9cd7ee5fb6d2c129b1d746e1e43bea8ad87e06522fef027da4bfd108113dcc86ad519365b7b31b9308252f8a6d688d2036347d240d9892a5620f002c1de2f3038ee984749a0501eed904d1ec2ee4a0a84695bcf9cf2145b2ae86a3697e0a06ac3ac804a7166c08bb56a46d36e6f9e0862352e8d8ac50a8c4cf900a637e28c6ce20afddbebc82cd6e3aa89a67d12c31d9d728181b084d39953fb6b939caf91eccc54d888a863bf417bb53713415ad370e273b7af774cd46d0d3ad914074a962a20ce28d160d12e8a6c1ab417978f6b736f50917386acb7fcc2b95575e0959d442600f18c0ca36288ac29677bb7a681989e90fce58cf6a1fc35e50a1def65eab20222666e15ae51e9766a895058edbad2567e36a9356f4be2ed8a3502eb0423dd0ac92d2b15239ef86e21b1f85eba58dc0743cdbcaf2a72dfc1bcdffc3929bfc5a6f34b1bb45cb77fe2b5085cd5265cade91177032ffffedf75106cb31fca426f5345ac66bc624e35d9bc6a7c725a8ddd337105ba56d61dd3803447840a53dbd1096d477c878d36fc002482a1f82a9620bb073328338018ed71346781280b7e00d54a10bedf3b5535516f5591f882d018cc95c35716bc6bad68cb5533df0eeb867937fc72f9b03b4fc0bf902c83d8a2edfdd579e752ab75a75aaa7c3d9f1c8baa53720a1b8d0345a7b08a69c2cea389214dafd96bccd6c22f251fc689d8f08a657f7710548c91161098b1bf73b70353acb205a8bd14fc87634f1ee02c9e8cd4e20798b54b3e5de810bd1b21e53573b6f73684f258ba461f686fdfd089591807d179c9c2c57926e1b4456f9ccb1a375aea584d728acedac6dfb29efc561e382f257a68f673715b1199fdea7f8bf6de73eff734aacf64ac8a2981d7cceb904f22f8b24ca76136878f4d55d22cee5a9225de345140875511b78c1df1264380285ace0e534f7cc9849e057dbf5329707316036a3019acb0f396801dbba6ec7198ed93c44de035bed7472017af9ec4a37e174ebba8c48637033c8749c165c80f0f4585f7fab064b07e825852e450fa4f6239291dd4f4ba9f609c98a21c1e05715f8f5576263881cf8b183de45d3207e4ab616dd9372687a161fe234b72c5c7bd4d2d747adad6098e4387f37d033678bc09004433980c3cf7f659abca1390017bd3b7dcf72810fefc1ccfcb91d65a8830dab0a56176ae4fbc1b22179a508d8c3105ea5c0cbfbb87391fc33d4a3cee5444d80b85a4b1112ce3bc3eff2432ec6a8f1abb7db9245fbff203a5f84a33cf2ba5e82b2a3b8008568ca867500935cf66171a58ccd1806df4e40c909a42264efc6addd544e086577e7bc7631416ea7272f3b2e269481797d60eff296f3c87381847fe762004ca07360bdbabc8545e34a9177e4f21bc03254c146e003daccb625df3a530008c2a23e4e5c86cf48dba657905ce630e742d93d3f5f595ee08f8ad168841775ceb397e4f3d3a3880093a901216d032dba4d5a92037dcb1bfe2dc9f48a0603a2b49f4c128b404ebe29efa41d69447b353b73eb4993f3c9d0a3533d55fa405f4048cd194dcd5be4bf5ea58603379ba40301d72ca3be0d22cbbc85ac3448f84b19c61b57cd084c083125b2a688ce119bb121e59fc5089cbccfb1a451822dd4611f201ab7d1905116b14d71bbc2843adbaa5f3fe45827d6d5dadc0a600ea9e7204b019440a3cceaaec42383f7c6c34990350acd7f9c8da8d6161fe9576695067a2da9ef2e6fc839828aa460973c8676af7296dac2c3ebe9a28019a814d8f3b3d5c8373fc8a7b5dcbfb1d2fd30ee3854e743f54dca7ec6883cde9b3e0e3d6ed1cd1ce3f8ed944c78c75b3aaaee41eaf76d53ba9cc6e3c26855a5030a8fe33902e1fed83685ce1237464fa8048a5ae58e5e53225fa5971a7375d3dc6eff49139ce71d028abf853f9dcd44027533947be345b6a2bc5536367eefc30521d6045dfdb27196743f96b9cdff6772d8e228192bdf92c7737be0b130cb4011692a32495e15eefab2a96f346e376960eab4794f59523091b11aa7936d5dcfdeb32a4ed03f4531c1ba24d019d9f85f7fd4d3231f089cf1170d5cb445c92e7367e2693c145401fa701be03dc84188d8694eb0006830ce45bfefdb1d8ff289173dd153519b1939433a97603b8e625eb32366ae864a9940a2d03e1f06e87ed663d447dcf78c05b9254c5ab5a23b3cc97e3d42653d788cf8519e5d765e843f0fbb23be2c72c1d2697da06b1037a17d24dafaf645116b1df4fdbf1a9b5704f24b210c5e3a5985489e6725c48df3e3a132ceaf82c68fa609cdde6febc7fee498fcadd38d56ac189359047a4a914919da06f066be22f761611b6ccee116431484d9ed953ccd6c2e1cde8432138607310feee52fc5868376c4935152d5fc4d52fe4eeaccae40aeb9ae99ab0daafee803882cac7554c661ea84324708ee6de2ed4c95b4d52c192c2b6a22630c7eda55005dfa44361e7b275db00523e01fd0f8da85114a7cbac0dde4c94ffe6077b17b7ea6b3d698f9a3e79c66b398831eff8802544d3acd8365586d7ae4524720e1dbf19742f9e7cc0ea0312c3ab3f2cbf7334b0142d96794a8fa33e2aaa230cccd4e8816c9b42bcafaa2f96144f048632efc1153fd3e831fcf38d540d63c2d87b24123c4a41e1e3d4c62be6c9c83a4e043448c207fde0cadb025516316b1d74d800467bbf43748c1df31a375e713a1313e53b17f8f19fae6916bfa435ff3a344af287a5ac481f21f87b2fa8c6ff0d3d7aaa08ab6d3cb541f0e9a106de1a5dc126b3beb2042415ff032cc820f554c5752d12566e35b31ce4b59245fad05c8cb487527a0eca4089db7442247fb0cf9d6885f8b85cbae0bbd5040c485639f0fd6897f056701fe8235312410273709d1af8b2f54e8dc768299b3488426d1a87552b7ff09520aa3cd1f90b684510a6c0b5f93e53795101abaec510822f43316aadfe2f9cf84d3d9ac181795013b02386a821bea5d41112e07958f1c85bc32b48f97a89ead6e70071095319619d215e3b67a4c8eaacffa288865fb24c8686791d8de5a927e7145aebd1061db38b259c4b51f4d9c1433776717e5753491140cf3977eb178dfc0d64a8a004ff80c79ff411f0e3e9ba42ab58bc0d02c691cfb88e385c561db9e41cd75834e2aaeb493ee3c419194015fc4d7de15cea6ac4b5feb13cf96e89beb0f7dd3b1eff142ca856b09c4a9d18b89ac8aca39455b23724f268f3f0d5c9f909b74b0cdd133e3c79928c4e1d5d2c6d203a5ecbb2085dfedbec0d2078c9e6322dd1578ebe7602533085928a153b09adc55f7bf88e928ccd1fe836d787a29e8e5859f6450d9678865d3096706caa0dbd444961e5666e8f589ca7af098e2439768f3f4f7b7014966b8fc7fcb479e87309101abbac4a40a8a0384b6682ae75c18804202f4005464ab65f4931b84a4d59ded8fc42f1641b96c1f1b6cfc0581dc9c242b0d2b92e2e0183e02690d3bfaadbd8a23be04dda9cf88cd4d502b7e96c08fcf68ced3a9fe1c8f1789880c987608a657bec19b72210e500027baa0039f1198690bb91f26d7ade84fc146feb500c53c5d5a9e87de98815d4d0dd56ebd195909f5e4610ccc48dd1e1dfeaf276316a80d1a476f3014ecefde04357acea0f4310eb1f245e74cbb534261fd041d295a6dc3ca1638474737408474d00e21e3e9d8e84bb8fa004d403d5b14dcfb7d3400d9b57eb123ab21b12a2c0dd962094d8039b632d6431843879af91cb0d45ca014019870356a87817a49cd8d8a94e51dff9d9082abb568ca655a9eeb0df0c1b7cecc93bd905ccde0615c97af991b4deb779e9512b15970472de6ba972153d78e216c29dd355914c1120f75d460c6e894e4995b1c0a3ade9a1f3fe95ae33116a5bb157b4fc99157399522e4f78f1b60eae3088727ad6d7ebbe40c370269fc7fbf513b25dbd072d1049c1c3f0c5cb4de7d7679a50d23a518881e908dd73c5ee46cfbb4fd24bb6031ece8b1fa1996e234ecc4e2464ee7ccf28e3f9b9471f261cc6034f590d389d06bb5e3679ea25d00d3a411f648d337f9ea27387a0b09efe9d2dbc5656284c6832345a863af9a3b6c3a7a90907a40e361f17c5594e1b5369e53ef10570bb9e8c30e38c96788b7ed92ebe93e77347585ae567ef19f8cc5e6b34648de8a79e67a5b4e51f3222c8d6e6b694be1b7a2294d04bd9747e9c27ab90012089c4362e9b8dfbfd7302360851211ecb9c1b8c52dc23d10160a88b862442c0e871c153a5cc7b50f72a4a9437e114b6bfe01c3a5bd42bda1abae4088d99aaaa005b070e606ebcc73c075402b527f65f1f18bfd6f40e90c05c3e4b45a416559f0d91b2403d8cb2882b257976ddb2767cd7b2d7b7465e39f0a3ff977d973a1a37aaaf56b3a0510734f138c9fabcc2c92756b71dd6e9c9ca8f74353dba76fe94bc5e4794e43a361bfc882a895c718ef4e96ec24cf6b46f5d35f7c22b681ae4952cf41229dbac1b02386ee5c33a91730053f48202f090e0fc01fb99c1bd975cbfacc464702414aadcce59dbf76145cd418972021b6c622d07aa88e8f5e470988dbaf6a159726635ce8266eb163e79bdcb25d92425286492cd1234772393021697eb1f0cb6a38fa9859be8a6fc81643073949fe14aca7d612308f64cd3384416ed3033aba9571361a10ae030845e021e4d89617a4459397605c13a1b73fe74ca646e4135db5caa4a9fdcd27c35a88bb085deee56266401e0399c291f230f2411c3e0d7890a8bb3e074ad8af52ee6f588f028e6da37c82d7629f3f232fc3a205b8ac41d6a697b93f0f7dad836f92d7f424c7c93c5e1e4b77588a47e713366cf367d3ab829121e426b727b0383e1cc018907e4f942368ec928ea2a1b689f0a59b890c65744db0f292de7b85f3213abe26869faede3c8fd5893497f42e07ca8ff436a6e45c95844b9cb603c040f23ee87265b346300a9fd819d54e648c03a3291e2a20a17df95e1c6f1c2b19ecaefae12dc44557f077ffdfcea8eec8a029ec9dfcf65d7228889e83222150827296f9c0e2bcd81ee4e26cbece4ac7a7e5eccab09921f6e1226b172f4c67103343c527b4f96e8300b5dc22e39cd1ba13c7b07bccc", 0x1000}], 0x2, &(0x7f0000005340)=[@cred={{0x1c, 0x1, 0x2, {0x0, 0x0, 0xee01}}}], 0x20, 0x4001}}, {{&(0x7f0000005380)=@file={0x0, './file0\x00'}, 0x6e, &(0x7f0000005780)=[{&(0x7f0000005400)="5d592ba9474f4e728ba6341615bb26ffea36273ba0992d25331dc350b0d02a5d", 0x20}, {&(0x7f0000005440)="08c04f19ee3ba1eb50729810c9ec7e185d03290af8c7f019d24a7e1fa8ec8c49b3934c79e1e8073f9f2c6a7641ad40641c4aa3631b83702440181d8c1e5580cbad2fbf79d92950c7cea314b5d7cbb0a0afb78087d756e6c15a994111443e2a8d59ec8d7077a5fc94a59fccc1a72397c490e17a7288ae4239f97bf9368f1db42ece082e70b70cd567602a4535c5d8e8a104d092ff5e05debe64ab8d4fd407175a6283b58bd4214d888bdd19a6c3bf93c517777d8ea60e974699f77a1b187c769e37c15240cd0d92e1a5e9d833e73324cb1b8c170fee5a02c9da341ae09a8a4e01f0fa448f502c0c3afe7e4e3bff9c720593e481052bf9829191f3b865ab", 0xfd}, {&(0x7f0000005540)="a17df022acee196f3a0ab61e7ada472a8b42f2dbe1262db834f7ae9cefd0cad0b9d696621103de3dda4dada3f3319751ea0e1dca38c4084da884a524b7f790c9664b92d745cbc1cb352fe7d968a6f430e1935ca471a261f6074387adf333b969e2a7158fb62617d20b0354e130578be1d01c1995ff6076b58e431b5384ab093f30e7e992b430edeee329b2935db775e2af4c2b49af0bb931b2106e01139ff28d069e90629f4a3ab07c39a7152138923cc4ac1827655e3d0e78ae9be24eae725a7ad2ee86ecb2942ec38a86149fa6c1b30e4be6613fa7bd3432b5de84b63dbad0b860821f51585b5b95df257c422fba8ececb38", 0xf3}, {&(0x7f0000005640)="91a2585f2ecffddadbd0cbf7b6359eaab6f865ff7521084bbf408ab3effcf557f88a4fd780a8ee9d7ea8f110bb3dde5d2fa328137b284168d196c9bf187e1b3e5ae836fadecd004c5d41983a5d8f55dff91e6263aba40fed1ecb69ff78b80a638e117b065a44711fa4fcf131ad4adf138db269a0535099724c176c0cb20e57a63fea1d8923fdcebda03f741d96fb7e06a015e1f9b95d01c50f258ad34a041524542d0701b5d1e9de8322126ec6057200d288d4c6c645cdc43fa245872567b9860c944abd4aec534bc6eb29511d8fcd61f2e2ad45704205e573023cf44577352eec8eaabd", 0xe4}, {&(0x7f0000005740)="53d48fbc3dbb9d6e140f1d5e77eba35c9fdfbea14a60e1253016c006232ff984598a1d1183570d99956c1c7f7205f18e91a0", 0x32}], 0x5, &(0x7f0000005800)=[@cred={{0x1c, 0x1, 0x2, {0x0, 0x0, 0xee00}}}, @cred={{0x1c, 0x1, 0x2, {0x0, 0xee01}}}], 0x40, 0x84}}, {{0x0, 0x0, &(0x7f0000006b40)=[{&(0x7f0000005840)="bedc24dba49e008f9578d3c321f1aafd5116ae65c2fda5f7", 0x18}, {&(0x7f0000005880)="f795ba464eb04b3d849e41b6e4c6ba8ae88331d88590d61ffe3420b91bd3f9265dd2542e77c4c555f685ac296e3bd7082ae0e28631e430c02093662b578c0af81bb862630639372b577bbe55a3be34a6438e4254edef392294c505bf189f00623199212f873d54a9d3", 0x69}, {&(0x7f0000005900)="1d03969f0e6786f444440614743c78dad492038d4e7d6de69266b4aa66b47ddb7f06b51e77ce53389b48dd0a7211f43014d8ca872dea603abe32971024817a62b3509d9cc13772295f5792aa14a3fb61d33490e994b6c369a6a913a9507985f2d371a4b3d20e9c9e0f27cdf729e5959f7edf01ccd17c5df07a3370f976c98367f93888f88461fcdfa9772de3eaff44b3", 0x90}, {&(0x7f00000059c0)="66444bd856da6222775540e315e384f358c142f834fa7ff6b54e34c10a6fa940b91e1a5cade9b32eaca4693691b65813ecb9989090e85ef0fa5ab48152ef1e4e87b2be50b3537ade3bde326d9460a2b51096c85d05a73a3c97cae5f7b973b086e988e70dfd91a9455943ef3e618a02b2ca0f1cc8d5a1b6649d38f8db7927045f11ca37b90d05297ff4f2307499c70fec51822858fcd89a17257ea15c0946f7d5959bef8f5d720c7e58e13b676b878ed39e29c55dc32ff91d28eb80796d0aab9a3936f0eac6d85544d90f2702", 0xcc}, {&(0x7f0000005ac0)="e38094f454c376337e8b863126328d4ec1", 0x11}, {&(0x7f0000005b00)="548b0a55c94ff39b5893602d102d271c195def40708417b7fe21c2b1167c1d5b424d5d63ad40f474476ad6107bd7850a2ef611b5268a8094882c13afb419570d397a2dbb46c7724c56ecbd149be0ce199524e1c33eed22bc26e738cbfe86a07c83a94fc483cb9ef9b411e868f112a8a4684e056a2943fddcd764b9730caca6510eacb6f278c0360c9cdb7f2d3b6e6cbabf928d67c8448d63a959348429109c644b4099660b5b22e52149b4fe0d8af2adbdebf136e99f88585ccc26d108070ae58c5f3510c66fa03df700c77a9d1823de447c4cab2f131de6fb800a87f005374bf4e40084a2f0c476119770a04aa9a0bde42f0bdf69b9158801d658be9f31fe01dae6292bdee540030da638426c386277a8efd4e7603b60e7cc6d6c2bb43e188856d890865094069119b271604dd8b39085a1a4f00dc2e4f0d61a9f66fa0f8e4973d4a0255f760406875b9eb7628628f3ab48a1fbb270ac07ecd4ccbfe06d5c2b090f5752bcb4b7a6241f1fc1c482e79d5b2d217ca76dcae3ac3ad5b5ce355d81fa42b028e17e84e1f1bae9acccceef9e85092baca8c6f4f8170c85bfe65989c968a945bc136cadc3f460b14870e3a92df5339e8961660fa43711670386c04842601165d072c8d1f5edbc6fdbe29ed067ea349a71a8905498f66ab04d6326ff30dbd626910572035b488683bd5200210020959b6521dbe89fef399cd0b215e05acaea588ad35be98a21c9e64410215a56fdb38141505d879c0d539d0dd80728f2d41bfb523f4177c75253fb40c9dd03754b86d1aeaf3c1580ff235e0c9984c89e575df6e9c91de9e6b5912c83e78e03e20cea081fb0e5a82b3814a95f4b86f67c7fb59e6a4aa50c059b4d01dcec078a7689f850c8d7d36bb6c7e7d0363de2de2aeab990cebd9ce2124e834b518c54408c770bf8d6b262130d0e04699ec623db2818b3c472731a124ae4d42021a0b4721eed496c83aa3629d4aa2d2bb0cf11602cd644f72254005fc3b51f7a59152f64b190657b8f1370a4a95aa6307c8889f0df13a0271231df9df0747698ecbccfe1bfc3693deed692d5acb57d0aaccdef646c40bafca7c044c00ae8a338a95ba4c29d45bc20422a5cb44656479bee4c1a07e7b633c447b0f19a7e9dd5ce3b26fbe66d6f21c62a665d2772475ddbec460129f7594c0a749e73b3271770c4a9b53a41adcd623d5fa2dfa5ebbd0d50cbb5e590dec93615e604f0e9b5d171571697aefe9f6b539dfea5ab8cd8720878b61a0490e14e1a7fa15dde65856839471e0ef17263cb8faa9e90a18cb92ceeea45611a9eb3611730574db7689a68acbbcc25ee178671baaf77e32022ff1fb094008ffce07c2e459c283861dd4df3caeff033b7aebc59f7c19f901fe30cfe8bd4488f73b05ef4418d5d0e975196634352fc4a782876e93f4dd0d826d7b4fdfa367e45459949680825515b8356ca0d66c57b90761c5aa0df445d0dcfe8f81c50a7bf69a4e723aeab8c930bfc51a102c6237e8d1c82f11c50a46dd8058990e94d223262252cc14395b3cb9c69b970f8cb1a2dec6701498df4b22c4f8712a361b34176be95940c9826ec11bb8c6ac7b4a35161ed0f3c8e924a7765448b8cf6096f7e2f1ec0a1d3ba67a6dd3f1c73df2c6e4942931e7adb6720c82ede735e64a8c645e1a2d790c9c0c17874bb7988424a5871d1d465fcaa9d6a36e03cf5e2a0af120b07531b6a973c1024f91cef25be4d218d8008197369757e0d74fba417a435c4002cd211e19976eac67e3c7231328f49dc619dcfff276aec1740bf21a4609acde2192025a04129adb1c1eaeb71bb891d1bd633b8648693d58ccf88855801f83817958cf1a2f39e9701995d6d939d1896a9bceece557888293aa003b0a5ab473242ac077dfc96924363d64c76f6fe07823adf0243e5f02a8184895e72166eed0278e71c51d44203899b72711091fb46995d9c1b8032dac8b968176d6fd2c00f50919d7bf4f7f2c157922856de0a29cbd7aaa42b042d10f481910d3a678fbb3c075a8f43bd16d328729466a2919a76d46d70fb1be4855d2cfe211b53616545509b2426b76544c6e1b2513bd125aab4120629a3b551bbd6ebd90ecaa5a9c898c7c5d7a88598fac183f41f6a52602980dd7ce20f1e4574ac2820885720c92b5e20798f3e7b3b82603f9a4464f727bc0d0c569a070737ac12fbd82594e10deb906c1d1dabd3133492cb2ffdc43166d34bcce7294a4c3aadfceb603791b8417a9128eddf382f9e2c49b916864837554609ef9a2f8c2430f3e5d5e0133bfd232af3f93d007f259b8c4ef850d3e00db53c23d5b37d085d38ab742148e7a9425908ae4bee58c74002a3e186554c7313be11d48732cf1ebafee5134366c46b2ee06ff477ee090cca57c671a6a74d2e76f4d78786563547f74506a3c8da9ee3eeb534fb2a15e42e90b0f01d83190fdca7be281ae9a2fc1136f4f3d711591895c65d1cb424903547645624763e91b74ee71889f50610086bc6007462e24c23fdddb1c5d147235e85e6ae0e356ea19443d0a80c76b7924b64a377e620cb767b8059dac86b20185eed2372cee82220d2418407cb38b2f99e26c5467da910d9a4f968480a080fcd14fb5b2dd424e6b1df7304ea730adb054e7c072ec69636d157456adcb5e1d7d1b34fb1c5711d9387a1fe276edfb3101514343789211f23841f5c6565b7345cec480afa002c4a0c23e14417d5f72144f5aa4795957a2c0ac0f54f23b86a5907c9dfd79cb7957857a63d11e124df7676b1ce0c50af48f1f22a30aca08d62c58afe3e5199cb870a2d644d0ff480b07b9587b2ffafc38bb8d6ec307c4c797e376a7b4078052ee7b52ea4816f73d087e88ccf35604c916718f7cf9ef2b676d3c5fd609f4c28dcd39dd476b3c68f7827993fb71e79696b0c3b338fbce8a436ed5c11dbb903c9e83ec7d31cb14ca6048eda714abc143fde49b9c231ea78eb4d9f92fe89a124bebf08019e2f43f75a91c9e884f2dc3240cfdafd446f08ff3c304894836b44c9b78a8f06317a354f6d26eac23871d9bb865dd76cf6b81c830b12ff4cd12ec0e8486b1bed4f214950c238b0841b85369445a7fd17ab43b599869167200e242ed9e6c1df344391092da46ac15af337550307909d6b586be23a340c18d8b47731f3a7dcfd4041c60a3e62dcb1377808079c8a4440a952140e8f69fd2efc69ee4ef20253a4110e2360433b5e4e066a3a9afb76eb412b1bf20faf4fe2c4ffd696dcffb812030dc045fe15e68efd591c32a896912f7f9c77735aa17bc37351af23e5af2a59899054436c63a6cd50d7ff0decf0f7bcbf4f20ebb178aac3b8b26ba792206b6454874b797fd1907542a6a306f568243222ea4dc12d21a81f63100be278c2e14f05489ed9b3d9a748a6ff27cfcfe7419eab5a7cacfb8f2936920bdb965ffc2d5546f98dc9faa18bd74bb2456f54852ec5142bc6f31ab199cc40c0512f8192c219cce4c88ab6b984f99b43265da03608ad5815059bd7740a594b992ca78607c978ae31f2fa690aa914c028edc8ab687cc8fb21c5612270a31a110d24c012019cb68f4ccc33592565611e73f21335e4887d80afd2037284fca6b756cf491136b20b6399d15904f1d42f583ab1a5a22caf0003f537f1f19ea3485387585b8441621939ad00c5834e212b3f09d961239573b512607d84bfb9dd2504bb524085f21edf72edf3a25a244832ab4481a007deb2d27c4c57ea6b3647643a400742521d44f8cff6610087bc99cfb8a2c72a5ce12eee8a38d75bc1570f95a5b8dbe1d298d07f667e762ef8a194b811035597aa9eda760f082a7741e6ee2b6a493be793124537c6b25b0aea754362b5c5b32d62c2619c161053d089f3e9cc8e1fae6a2658c198b389903bea459af41c830a05620d65a0e5a4e90f7e751da9609157adb1f3d497b64a420eb2e4a9549e2d935b5557a899cd3a9081cde0b7cd812d9720ed1ba568b1c440dc3016eebe01aa8d244d18c75076bc04b49439fdec070db2e3984908ad4145bb422f537b5480cb905ff70e86a3c28cc20f55ec4b4f28a8f7285325e918c62183ddc8fb172b2b6ff5ed3bfcdd600a8ee00352aa060d3282db5da0c0f1b5df3d91550051c445be5772afd9e44dc4944769bba103800d2168c5adda863bb77fdf7b44bf67255f6ca4c5c8d1ca558fcf876a27a6a3654d6b7cabe7b715022ffe5a82aa49d25b31d10d50314fb84567221374892b9394a673c1a82bf0441167e4b5e906aa303c90ccb6ee7d5268f66cf330368bcb147b2d00f71a6a019063ab9a726ee4b0094c685ba601ab33dfb8e9c37c3e5dc04bb1ea15f05c14ed1771757e95c4473b51da5210a78a3756f5c9fe7db65f18503a703a572d52142c1691758169b76ebe3d4dbd15f4d55d4b141203ce06a6deb3d26e8c62a58ca26093c415f08fc01c34ba4d6ac21abc2c0cdca269c89924f562c4a1ce81fab80623ee0841f9dbe7e4f426d95f48e3277d8950c0f4ab172e82f3a39a18cd16b2f6f03bfd8671950dec0ac22ebf16fbc9e8434e3c4e17fc643bfd9c8d2a1abfe44bbaf0a9c77dc229c54cefd51092ed1db2c669e77f7c5545ede091369671d84fa34e94b0155d8313fc46c5634d493aeb050d1e329c9fbc11ab5eb9c5e148a797c11622e75cea695f2f2c5fcc8d31558ec409dc0772b3c5eaafcb9f34b7b2978e95be78177f9c1f8629d204ae66b0c81dbfd64fb38885d1664e3b241e6872be11b190178d7c880df2495deb2f8dbe6233b52191b776196b7835e3dbe6ae4f6ca2865020acc84e111558c25c270cb9f500e1fa092d925080108dfceb88cf97593baa48b244542e0875550c5082a2790f1f061eea3e7ffefd907b1df212e16cfac7bfc692fd023a675a5dd66ff71ae35d75a90577dfc01333a562d9561257680e231c0e2c66026f9ada2c3615c8b89910ac20489850b3555dc57b7052fc07936d4bbe73b71ec7bd0daa453fa5d6f4eee48729879c58455dc73fb5ffc6d385b50ee7c68c49d5b17b1b00d013831932860e6f688b6cb1b6e4559105d291ad370adc3d45da0c0c05cd3570348127d005c402c8de4d04df399210a8b2bc67ab212758f40d03e8b6983d6fd7117f77b4ea1e603c6319868380adbf98bd5dd9182f84d8cc3545853c52e9874f293f304342545018d566090970458c6fd6a473473cfc160aedf86b7eab2815c74bdfd63e98762191919f11b0c20b8d21c5907d8bdad0cc234b1db060bfe8b56b4eb96baf28d6425712d4d895cc08e0450e3044fc5abf7000d63dca8ccf4fd34fd91d27c0d572253b30f9b1962e9b94903cf1d8e44ab35216d00e50da4a8f3d42b9a14ab245ebbaead50f34acc9dfeb909ee8d9bda30aa1db78658522cadb6ab34a772e86ea807ef72a14c897edaee057fe6ca0ad34f26b592a5934332e790d1adef4a7a23f25288f03afc18c885b122351c5b7bd42de14a51772fe193dbda60dee7820b4f45c135e022b368556544e1b36cb4459fd92b622538501e36eea0ceec51d2a664985139cd5124df8f018d4371bc873e7f5d9e3e5d74e7e89a58fccb642da52d9f7051ad6e13cacf4047d8f630270fd3eb05788d221a92d8e39d0fdd77eb0c00a493c038145dcd527792a61eb4f299f19ff9578c9a83381a1e4de1e39076a5851b650c051fadf6b5cdd4f5f614bc5a5f6572eadcf83e23d05c43711e99d8d747a5d59d25835bb4dcfd9471fb860abb42b1e3fb27ab1670c36e6313a49660ee0f4a100804be0ef444536fe80cc8ca08985eef8a49b05577bcb078888", 0x1000}, {&(0x7f0000006b00)="355847449e8ca1c2a9e8898c3e898a31f1ae35ad", 0x14}], 0x7, &(0x7f0000006cc0)=[@rights={{0x38, 0x1, 0x1, [0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, r0, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, r1, 0xffffffffffffffff]}}, @cred={{0x1c, 0x1, 0x2, {0x0, 0x0, r4}}}, @rights={{0x1c, 0x1, 0x1, [0xffffffffffffffff, r2, r5]}}], 0x78}}, {{&(0x7f0000006d40)=@abs={0x1, 0x0, 0x4e22}, 0x6e, &(0x7f0000008040)=[{&(0x7f0000006dc0)="0e36e4cf4e414bd7783e1ad6829fb9ace250e78d8f19cc8301642d8bd1036c69f08dc00bde392bbc4befee3ec3bd09a688d20928eb88decb0c2eef59dde5a180210c59fd595b58e74f9beb1a08d8bfa61e27614cb42213dbfb106f1de50bd7414a90202fc33c38d9b0a0d4598c6fce497565c805dfd80fa5784755b79df6f89c8fbc7f36e6f163478c7fa2945f3d30e46e5fc85d338039", 0x97}, {&(0x7f0000006e80)="19d213ef10cf7e9fa4a1a2006b739a7957fce4932402a0e6123f5da2884242e6f63b50445ded73228b5731bd10c17f3307c71ad03f9617ba0b37ba2439afbd5ca9cb4f03a71736bc2c2eadddba3c0ac005cfcb031452f828b190787e0269fa264449ea369be0564d523d5eb81072c00c20f42f686cf6fd49823084bafca0521fa02899e05cec27bdc0ea237491b36afe8caad2d5045bac8dbe200533d89bc5d1071effd4fe442f2187d555ac8096d663fd6ac1123881eabb516483a3d72d75a9052a26aa870944a6d60540679a98f8ebb4d0428997fb8cc44e35b630130e090bdb2a10701437d6ef86771d85e90c4d5933697d04f54c90177a04ad45b13ae26404e9f60a6694e11c208cfd34e69f9027dbe16dc9eda0892430765d49fd0a65a37b7707b040b967df04ecf5159d6800a4d8d749080c02be772eabe9ebbf5ecf286d132ab406875158418df9ea37152fc0e3b758a075d0d3511f2bafd4ee9e7107c5395c1b4fc4ab442885890b75747f21415ed5cf547a8e29adc7c1bfd7f8754ea27d8ae5fe66604f6a587fb15c5436331c35bbcf03b30afe8d993b06a735c7d3b23ed58f9da2508b768f6aabf446218bc32074ca8241f379a669786ef2435e70d2791113aa47b7ce8a9fa5ce7b506c907f88cadf566a592b33661e0ceb9cfc267785d4aff387884cd02d6ee2ed5efdf84a9fe819179c0ae0187100fc51f53a3cb2114f1f851758ea14d360062c00d6ce7f0dc1b5b184bc4269d4af1110e2af0045c1ec450b3830a954f4b39622adab39fc07e7d6fab61f30e5e3c505ba15af565b75cd42a59cbf3ead159c4c334bde71bccd515a9875e719b6750d323e9fd44892f7a66870f47077f3c22b1b478918c804aacc811eefabbe838628124eb85b755c92a232bdc79d1981df85d281562b7d16bd4dc97b15206c87247cb86119b0dffe6b3863e7396f372c3b76f542b2f5cda37d41e9e296ca021cee0d6c14ee6c65d5ae560faad98e46b7042289e0b24f7f6501e17d09a89039fe0e5aa26fce6f0db8196926cc7e271a1557daafbf559a89d0495bd90a9f15466cd560467db32f04174974e65e47bd91b56d1d8193285838a7742388a06fd3aa4e98773734a2a9dcf9a5b849bef52a9248a93c49a909cd64d1874b844097fc7edf0b25c6a42cb986640d78fb0dd1a6a5a80369ae11076fe2bfd97ac38f35b339435b5a4d8716b0e35bb2d8b7ef7a9207a6b4aaf4d2cec6af2a00c19308fa18a1ee510b2926ac2c93400c5b0d3a85ceafaa799bc57a4e5d334ca69baf347c3fcbc46bcb4530507e999141715451cd6e62fa800fb3cd90d99496560cae34ba8b8f62dc0f6594b4b9ba06009990ad8ddaf387a5fa83e1930faec4aa7eab8a38023c0d79c4f9815601f2fcefe7dfe36af7fb917dc0b6f6a5018c4274e86cd9d67cb5b3515d82e487294dc7a5d8f87b5b2a8cd5d781558d9e3ef3248b13f62851c064fb2edc0fc4f1992bb1b888cdcf583e5d49f467f388a0332965c3840eb318215ef967b627e6a62f44eaa5363e0cc8e3ccab4ded1325a22503264c268f9817b1bf16a8ffbba34b4422f30ef8ffdbe820c80da362874d5ee42ff01c67893babbdfbdc55c5bad682ce0b701745cccb38a46d95a1e82dd6d1e6299b28e255c459b6b4936f7e55db29a4b9a91ae8404cf4baede161b8fe900fc23b66e29c0aca38e625d708b24d52a1d96c8c3cbf7da2a4317e518ef857eead7dbe65d381b77f584c5e79afb7483c3c096c4167dd3b732751841b934b4650add418af4ab963f504d77f1f0e3622cfa78400f77bd7d0a5490ea9f12a47a034da54cccb098fba8f465d467be3449543f90414a17b9abdcfb69878691db7bdc21471fe2c923591765f923c6ca396d6c1f8adeb42ceea508ddb3237b5d317757d8130ff8e457f40dd220f50a20c946bc654a63c4c9a3384d655a9672c332a92e3fc83eb961697996eae926e6184da962af73aded4a865267e663d06a4189f6fc3a4e69f92bb9b8563c7b78378bb678c5a8f584a361323ef276bff5eb7fd1e8f5d4a0b649223de7776002dfd493caf3f1441f31eee3e998b845c408dd981fc851c1950046c4dcfc877bb451fd05cb71e031b40becac0e0bb3d4e08f6d71475329699705512dc1612b7b2f316efe6d87c97cf466559219f0a306366d1ff2fc103b3a83b1b57042a0b8a30d87185018fdc54ee955bcc64993d486939e5ec7567252953a8c8be1f2724f6d2f3a5f12d173914c3076dee147219fb6e5b23ba2d5a8c7cc384f05f2b3ac4270a91e6881256fbfca6cef7a5673efa9b08aee528236221090548cbc860915bd697baa4960943ee847c2735a31ee466f1ac0cdc62c87863365d219fa258b7193ea0c5366d5b408516f2e8d145c9f9a84e469463d066ce8c2e7f927f965da3bb06d82a1507b6b9c1dba26cfe738b23b95f96c93aa0abc4fc9ef970e33a56c8d53642571ce29f3cd207eb24c11bb2199b001b29dbc0b88a8df0e945dce6ac16a415f4039c66566a505b3b521970fb1d630672a0d477a7e5698b5a0bad47f52b694217893ed9b27c14a48d15c7571681cdafbbaf29ff7d587e89d1acfa2e4d1c199694c8b395a4bda36232bfe92f9b5a9a4c48560e383d7367e6e8dbb6b8a778671dd2ed7a32f050293401c37b6c37fbe00d35b48311cfb38ad5517299003521cd41f94f29c86cb9a51b0e19e947631e61126e0f23c14ec70502f57b54c7d9b05ac683e97880f99e395be449646e8ca945f571d77a0a84d0bd0646f27716c40ad7ca5e97377065d195e1d5bcdf1acae27a11af3f574931499dbe5f8e03150a249167f2042abf7b41eff4d9d7448ca07c473c7448fc35820fff76ed8bb77db6387c63b89fa1962845e51d49347bf0099c3d0f1552bb9a84e6c89a66a0e53d4e729364336fe91026e7f9aa7e78f9ece080b0c683a462c0d537982df591811527def889f5db7f86b69c5c99c98f76bb0f095a689f8b9fc70ad6543dd288d2c646ad124d3f5e46f510b90b372c4d35d757b1a8cb84a4eca9f3f403d26f1c1a8097428510e6a63984a0ab07786bad3ffd9a726a367ade3b9f2f92e2f9c4d6d8096e88143305140151d987dbc80963696839f2abc821a7a4a6da861bfcdfd5a990a951d05086575f8d6080cd63d53d97de54bfa5df7843bf5bd287db9a29a1843fab25b911cb54d5608d20cd19e5943222fa71163e73ce67f237de784c1578799d44babbe701abb557750725f25fbfd080775e86c3f71b700865b57465b0c9cbd993c4a316392b5a725346c16a066513706007d2292d6e1095d52385f21a62b24fb4b6b7f21d9f8702fda83e8352b7ac332ec2df14b2d09c3811448d46dea57523d2b31f4e6c3bf1f6325a32908fecf8cde4aab484ed13d6eeda7a456542d9942b03c1f4c054c37d9e26089990e73773310165f241bf10a6b7dc1f34d92565dc8c1ab408feff2d73e05dd3f415b8ad06736409d45d8a134d3ec5406dc3a4c01b11e1a1107eb59cd995f604ba1cc05b42e08b895f060409682f40eb561fd66e3ecc0021009f2ea20c2c1478a2e9b313e8e85d0c372821ee1623279222c395395c0bd255379928ec4f609171395d967ae706468aaf2cbe7bb53928320e6ce4d6d5958a4e28e55a2e8cd5d9fa974a2190e395bc0dd6535590758bbbf00421f8097b1e94f26fc52e582d8cd6c4185ec95eef53ccfbf22b3a1ad50911fc1e2c0e0934626bab2598b35467c47ef662d67a5a957fbf0fc334a127d576dcfbf498307529473f770ee7f4d724fcd5d324c4dd3bc675acb3877865b78353d5bf358e21694c9a10faeee71c3f965d5439b8fe699b20eb96741da88a9ff57eb7b210c79d8a744d21765613bbd0095ef7d0789a11d99873c38a6c9422a246cd89fad72938dbdbda23119945ad3e8e09db4863c1cd156cd93f6a587b7d6a413cbef77166c08b1c577072403c3c223fefab025b2c26ffa296fe7e1e5941cc9329e7ac6e19323f0a905716da36841e0916baf695e3aa7cb52a55302ea1281a895e5cb494c1eba6cf8fba751fa1a72bf102ab890aeab62a2c7590aa30e294964b656b4e112dd743fb330e87e2f3d6f0724a1137ef7eebdf8aabc1f26f1cb3235ccc7af35b129f453429446325fc60fb9929bd9e65fbfe44e58a4e12a13c55056cf9205ac63e26908b7a2c63a5a518ecba81fb14428846cd9d7b5cef0fa0b8b878a7a1e17f96a4dfbff29eb5c389e37b9528d8e943a0b3965758f7d5fb442791bee4d83d35ba74a6049b34b84ac158a0406cd5228abe61ab0ddc3f1553595553f66a2810218826a9a1868d486957a9f14b9f0fb30c2a14c7ff806044cfca508a61053019c6ffed421083370efdd3f61b628a2da1c82f7475209619b61ddf40ed4d4a0110156c52e849b54c55cf5d4335469ed7ff9d48d5d48f2d9cf29d4190be4f1994ce4c9f241336fecac84459cfe3f7dcd7a5aba7a7be4942bdbdb4a16f8c2a9b7d20a3d455c17515faead7ca4b40f935c7123d7da283dbf0371ab67b8dd5875519c2bf5e0579637a7cbdd8d094839143be38b98f9d16e458761e8fe0bfedf6cc7d1a4151927a3009278287c2f41f7df79e04b7faf13a5e7f0262ea64b1b9f27123cd827416dcdd97d006bdef1c4b8e94b8fc0112f63a1cb04fbb30af79dbf3162c0bb1ac28ae461dd72dfd7099ebb14aa3e10e8a24479438fbabf0f3f79f6f0251207aaab5891421f7b7578b9b5ae859d5a8b3d8d986b5d30da4a320763ce9eb8404ca000a4f4f43a7b3fbca1dcef7299265d1e281bec2881fa5d14dc78571059549665ed4f12cffa0dc2eda64d0adfb1cf0fb729ec6a9347ce8218626620cde8945bedee6b24fa05998f438c079a3e49113044d500aebca46288b00e4d59f56ea5cd328247212956556bc5ea77fd733de23ac768931eef324ada06c67dfe8fd92cf6690d39949f2df581265d0323807036d992c311c26f2fb969f3be650ecf8eafa16573f2563a2e2285f4a604ae2b00f89c980b8fb662baa724213f48945afee7e3b27a382b54814f460489c335cef2b6e94f87ac529a20de6cdb5291293522b2378b369e43a33edbf34d46f8cc748ec94c12d400de20cf7f2fc7e1d5a3ec1960a63287ede22ea8b5866fbc0b6a9bf0e0fc32cac172c8e59fb0d501e13391f39b4eef88c234550d088fd7a3f132581b9a2737b25d4728d4370a77f64c5d5de14e2efe9b5af4ae1437abc17a288ec3a7f5274325307486956767734260dc3a5f4a77f563699bf38201685cf352b73765687a4a8fdb3a0ae8703974c1cf7986c012ac3d089c030b9bb69066f4f5fbcbfa23e760e4f1cdb07af94fa82f2fbcdb527a741eeeea63220dcd360ea7ce086b30a2778b9da1a159abd99a4ec1fe7a0fc72cfe8cff91c75e3e98243d69686cc03e8a80ed93d87da2b67a3d5203592ffda096cc02672abcf502595a9c1d5a6093257e31d69edc8633d96d824e58e61fdea156e08d70a1da4b212b4e218caaceff6be971d59629a8e71a5bc08dfd7b4cdd5cbc0bcecdaf23503bc576ba73ccf5edd01483e133f215b41fbc17bf419ae1676a2344c280439f56a449bfb43d94c1eaa87c7bac96f6b835886e69a32548b74703f4a527f82a34af01c524195021d37550bc68a0014f822dfddf1634d55a73109bfbce6799a29b3408bb6ddfc1dcaf2b45cfd9d629732f5ada1ddf02cd15c3d4505611c5dc09ebc35a0a3ccbafb96df5fc82bf591f9da70784968d445c01b367010dd509d2cecb07bf179f74b6cf61a7e201e457200e80f", 0x1000}, {&(0x7f0000007e80)="eeaab57f845ed24e38e1738779b9847a09a5526d9a0348a299f142999dcae6c4d19012c19ff42c6d3a5d8f516b4665f522843071110d74dfeec1d371494f2554c7d7a1105bde75f9bfc1db17e3a2660604a41334a3f8445fb836b2039542ea890d95b9d022890079fba8f77a5549002f95ea80e0f7020b64618c35e646ed5c401a189e180db5b5eaef", 0x89}, {&(0x7f0000007f40)}, {&(0x7f0000007f80)="ece25ae9fd4a82800376ff97ae32a4eb2aa35fa93c3d7009f9be16235a48f368da6ba2607a28eaac33370b4d0db6f6b0323641c1f2a806974ce65311e21438275ef492b4bd96bd977a937064080713ab17d8f2c8be3bd40647822b2bb812908ece370a7134918cf8b4864d51cc9b387e89c5144304631ca9e3efce3231a1cb0496f83e0a723e269c495d87fd4f8f294e7efcfca12023c2aa8991879775b97a9e47af4d709875a91ba5271578a5ec28e00a", 0xb1}], 0x5, 0x0, 0x0, 0x4}}, {{0x0, 0x0, &(0x7f0000008140)=[{&(0x7f00000080c0)="32d2682b5e5e6d13438683d3fd48b6424a463b273a37120fac6504852be18154bad8934be23bc302796c1ee3e8705bb7eb212415234c7ecb0c71143c6caada9958a23f59e5c58d0ecc4cd34452d43602facab9cc", 0x54}], 0x1}}, {{0x0, 0x0, &(0x7f0000008380)=[{&(0x7f0000008180)="ccb2eefb57d1df2e07193eb8480dfe377e9b44b4e4ce855176f1cb1ef85c3c454377fb92ac760f8ee09d30cd521555be943b9a1fc190491d292ed608eaf91614aff82466ecaddb10ff14469a904796244e98ef82f21b8cb1a23e5149de2e2177040d26ef5c85e3de35b9bf1a4746870127de43a8f4c3ca9a48b74488b9a61d26ba842fcd1b39f892807febee9f1b8a6108ed02e2728d6f68a70885876c4253022820dda1d7564e39864f8da322ceafd0eee3e4bcc113e1dde2cce692be644685c270c6c54fe179a8d115574ffef6c7c47f4c3b7ea9235da4dbb72fe7a77b7239dca21de044a946955e8b40ec60d60fddf32a", 0xf2}, {&(0x7f0000008280)="16faf5206b83c50c942069b7d96801cf5edfeeb2b01774fb72a254870f9c7709ec8bb051901ab8b4f105798dc79b744ace4e654879fc2f808592228b30d43cacc514279c95531bbd8bbce713a3ec87021c7a4668abaed97fe991c54654e44f094b56174d701e4059a76a", 0x6a}, {&(0x7f0000008300)="00c9ed957c06da8dea8581ea7b2e2efbbd59259d47d5873b500b009a5769166006a19d86788731054fbe1b2a0b262ece2f47a50e01b7ce55853270cb474373997c", 0x41}], 0x3, &(0x7f00000084c0)=[@rights={{0x24, 0x1, 0x1, [r6, r1, r1, r7, r8]}}, @cred={{0x1c, 0x1, 0x2, {0xffffffffffffffff, r9}}}, @rights={{0x14, 0x1, 0x1, [r10]}}, @rights={{0x18, 0x1, 0x1, [r11, r0]}}, @rights={{0x14, 0x1, 0x1, [r12]}}], 0x90, 0x4000000}}], 0x6, 0x0) accept$alg(r2, 0x0, 0x0) [ 1981.376952][T28401] 8021q: adding VLAN 0 to HW filter on device bond364 01:55:31 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(0xffffffffffffffff, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x262, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1981.403654][T28403] workqueue: Failed to create a rescuer kthread for wq "bond532": -EINTR 01:55:31 executing program 1: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r0, 0x0) accept4(r0, 0x0, 0x0, 0x0) (async) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket$netlink(0x10, 0x3, 0x0) (async) r3 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r3, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) (async) getsockname$packet(r3, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r4, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) (async) r5 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r5, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) (async) listen(r5, 0x0) (async) r6 = accept4(r5, 0x0, 0x0, 0x0) connect$unix(r6, &(0x7f0000000080)=@file={0x0, './file0\x00'}, 0x6e) (async) sendto$inet6(r6, &(0x7f00000000c0), 0xfffffdda, 0x0, 0x0, 0x600000000000004) (async) r7 = syz_init_net_socket$nl_rdma(0x10, 0x3, 0x10) r8 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000940), 0xffffffffffffffff) (async) ioctl$sock_SIOCGIFINDEX_80211(r7, 0x8933, &(0x7f0000000000)={'wlan1\x00', 0x0}) sendmsg$NL80211_CMD_REGISTER_BEACONS(r7, &(0x7f0000000100)={0x0, 0x0, &(0x7f00000000c0)={&(0x7f0000000500)=ANY=[@ANYBLOB='$\x00\x00\x00', @ANYRES16=r8, @ANYBLOB="0700000000000000000002000000080002002e02f00008000300", @ANYRES32=r9], 0x24}}, 0x0) sendmsg$NL80211_CMD_SET_MCAST_RATE(r6, &(0x7f00000001c0)={&(0x7f0000000100)={0x10, 0x0, 0x0, 0x48000000}, 0xc, &(0x7f0000000180)={&(0x7f0000000140)={0x1c, r8, 0x8, 0x70bd29, 0x25dfdbfc, {{}, {@void, @void}}, [@NL80211_ATTR_MCAST_RATE={0x8, 0x6b, 0xa}]}, 0x1c}, 0x1, 0x0, 0x0, 0x8800}, 0x2400c804) (async) sendmsg$nl_route(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r4}]}, 0x4c}}, 0x0) [ 1981.605801][T28423] workqueue: Failed to create a rescuer kthread for wq "bond974": -EINTR 01:55:31 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0xd203, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) 01:55:31 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x10e, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1981.874480][T28429] workqueue: Failed to create a rescuer kthread for wq "bond922": -EINTR [ 1982.134856][T28434] bond1026: entered promiscuous mode [ 1982.164301][T28434] 8021q: adding VLAN 0 to HW filter on device bond1026 [ 1982.259993][T28438] bond1026: (slave bridge990): making interface the new active one [ 1982.268132][T28438] bridge990: entered promiscuous mode [ 1982.278812][T28438] bond1026: (slave bridge990): Enslaving as an active interface with an up link 01:55:31 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x1a03, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1982.326363][T28444] bond365 (uninitialized): Released all slaves 01:55:32 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(0xffffffffffffffff, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x262, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) 01:55:32 executing program 1: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r0, 0x0) (async) listen(r0, 0x0) accept4(r0, 0x0, 0x0, 0x0) socket$netlink(0x10, 0x3, 0x0) (async) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r3, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r3, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r4, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) (async) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r4, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) r5 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r5, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r5, 0x0) (async) listen(r5, 0x0) accept4(r5, 0x0, 0x0, 0x0) (async) r6 = accept4(r5, 0x0, 0x0, 0x0) connect$unix(r6, &(0x7f0000000080)=@file={0x0, './file0\x00'}, 0x6e) (async) connect$unix(r6, &(0x7f0000000080)=@file={0x0, './file0\x00'}, 0x6e) sendto$inet6(r6, &(0x7f00000000c0), 0xfffffdda, 0x0, 0x0, 0x600000000000004) r7 = syz_init_net_socket$nl_rdma(0x10, 0x3, 0x10) r8 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000940), 0xffffffffffffffff) ioctl$sock_SIOCGIFINDEX_80211(r7, 0x8933, &(0x7f0000000000)={'wlan1\x00'}) (async) ioctl$sock_SIOCGIFINDEX_80211(r7, 0x8933, &(0x7f0000000000)={'wlan1\x00', 0x0}) sendmsg$NL80211_CMD_REGISTER_BEACONS(r7, &(0x7f0000000100)={0x0, 0x0, &(0x7f00000000c0)={&(0x7f0000000500)=ANY=[@ANYBLOB='$\x00\x00\x00', @ANYRES16=r8, @ANYBLOB="0700000000000000000002000000080002002e02f00008000300", @ANYRES32=r9], 0x24}}, 0x0) sendmsg$NL80211_CMD_SET_MCAST_RATE(r6, &(0x7f00000001c0)={&(0x7f0000000100)={0x10, 0x0, 0x0, 0x48000000}, 0xc, &(0x7f0000000180)={&(0x7f0000000140)={0x1c, r8, 0x8, 0x70bd29, 0x25dfdbfc, {{}, {@void, @void}}, [@NL80211_ATTR_MCAST_RATE={0x8, 0x6b, 0xa}]}, 0x1c}, 0x1, 0x0, 0x0, 0x8800}, 0x2400c804) sendmsg$nl_route(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r4}]}, 0x4c}}, 0x0) [ 1982.444510][T28449] workqueue: Failed to create a rescuer kthread for wq "bond532": -EINTR [ 1982.546943][T28458] bond974: entered promiscuous mode [ 1982.563128][T28458] 8021q: adding VLAN 0 to HW filter on device bond974 [ 1982.698342][T28460] bond974: (slave bridge932): making interface the new active one [ 1982.728288][T28460] bridge932: entered promiscuous mode [ 1982.738854][T28460] bond974: (slave bridge932): Enslaving as an active interface with an up link 01:55:32 executing program 5: bpf$MAP_GET_NEXT_KEY(0x4, &(0x7f0000004000)={0x1, &(0x7f0000003e80)="db3655c7decac14b9ef20513f0f48b1838d81c53b6c2e9c4399bf698dc473ed4932d6613881a4e8562aa809ff93ee83d0edf8cfe2fbad1505874395d481e046eb6a72d", &(0x7f0000003f00)=""/220}, 0x20) (async, rerun: 64) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) (rerun: 64) write$binfmt_script(0xffffffffffffffff, &(0x7f0000004040)={'#! ', './file0', [{0x20, 'memory.events\x00'}, {0x20, 'memory.events\x00'}, {}, {0x20, '!\x9e-\x1c'}], 0xa, "b38b8319a2197289e69c0e6810b151343a89de65f352c6ae8caafe008b6477dd4f981de745e44805a5bc03e1d82fc9d38f9e1ecf21fe08a14d4756e06b58c574324d02d7ab360c20f02d252278a6bed5899d3dcc856273a6ba5f32c0ff858ccb6a0fb9d46cd308d701f646973ea1a6ddd3d06dc6107cb27c81b48e6b2dc80d61048173411c970ed04e51ae3c0eed596b1571a03188776d50ef739471d03e64ddf4d9245a23d787dd3c095e80abf5e29189c750943b37f64fbe922771f910c4939bcebb10b83afa0a9a22fec1296570cffe60"}, 0x101) (async) write$binfmt_script(r0, &(0x7f00000000c0)=ANY=[@ANYBLOB="00000000aa7809d8560000"], 0xb) recvmmsg(r0, &(0x7f0000003c80)=[{{&(0x7f0000000000)=@pppol2tpv3={0x18, 0x1, {0x0, 0xffffffffffffffff, {0x2, 0x0, @initdev}}}, 0x80, &(0x7f00000002c0)=[{&(0x7f0000000100)=""/145, 0x91}, {&(0x7f00000001c0)=""/217, 0xd9}], 0x2}, 0x1}, {{&(0x7f0000000300)=@pppol2tpv3={0x18, 0x1, {0x0, 0xffffffffffffffff, {0x2, 0x0, @local}}}, 0x80, &(0x7f0000000680)=[{&(0x7f0000004180)=""/171, 0xab}, {&(0x7f0000000440)=""/243, 0xf3}, {&(0x7f0000000540)=""/4, 0x4}, {&(0x7f0000000580)=""/218, 0xda}], 0x4, &(0x7f00000006c0)=""/148, 0x94}, 0x4}, {{&(0x7f0000000780)=@sco, 0x80, &(0x7f0000000b40)=[{&(0x7f0000000800)=""/129, 0x81}, {&(0x7f00000008c0)=""/237, 0xed}, {&(0x7f00000009c0)=""/242, 0xf2}, {&(0x7f0000000ac0)=""/38, 0x26}, {&(0x7f0000003b80)=""/60, 0x3c}], 0x5, &(0x7f0000000bc0)=""/242, 0xf2}, 0x400}, {{0x0, 0x0, &(0x7f0000001f80)=[{&(0x7f0000000cc0)=""/66, 0x42}, {&(0x7f0000000d40)=""/249, 0xf9}, {&(0x7f0000000e40)=""/23, 0x17}, {&(0x7f0000000e80)=""/4096, 0x1000}, {&(0x7f0000001e80)=""/243, 0xf3}], 0x5, &(0x7f0000002000)=""/174, 0xae}, 0x10000}, {{&(0x7f00000020c0)=@rc, 0x80, &(0x7f00000036c0)=[{&(0x7f0000002140)=""/4096, 0x1000}, {&(0x7f0000003140)=""/132, 0x84}, {&(0x7f0000003200)=""/55, 0x37}, {&(0x7f0000003240)=""/163, 0xa3}, {&(0x7f0000003300)=""/222, 0xde}, {&(0x7f0000003400)=""/167, 0xa7}, {&(0x7f00000034c0)=""/177, 0xb1}, {&(0x7f0000003580)=""/85, 0x55}, {&(0x7f0000003600)=""/178, 0xb2}], 0x9}, 0x800}, {{&(0x7f0000003780)=@nfc_llcp, 0x80, &(0x7f00000038c0)=[{&(0x7f0000003800)=""/17, 0x11}, {&(0x7f0000003840)=""/102, 0x66}], 0x2}, 0x80}, {{&(0x7f0000003900)=@ll={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @dev}, 0x80, &(0x7f0000003b40)=[{&(0x7f0000003980)=""/243, 0xf3}, {&(0x7f0000003a80)=""/30, 0x1e}, {&(0x7f0000003ac0)=""/71, 0x47}], 0x3, &(0x7f0000008700)=""/233, 0xe9}, 0x8001}], 0x7, 0x40002022, &(0x7f0000003e40)={0x0, 0x989680}) r1 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r1, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) (async) listen(r1, 0x0) (async, rerun: 32) r2 = accept4(r1, 0x0, 0x0, 0x0) (rerun: 32) connect$unix(r2, &(0x7f0000000080)=@file={0x0, './file0\x00'}, 0x6e) (async, rerun: 32) sendto$inet6(r2, &(0x7f00000000c0), 0xfffffdda, 0x0, 0x0, 0x600000000000004) (async, rerun: 32) r3 = getgid() sendmsg$unix(0xffffffffffffffff, &(0x7f0000000580)={&(0x7f0000000100)=@abs={0x1, 0x0, 0x4e24}, 0x6e, &(0x7f0000000400)=[{&(0x7f0000000180)="94942c3d1e007dfb8404de29a8697799b1f5d6823a70813d4cc3415c6f862e8ceaac7242aef16f9f7c571f15aaacea204d20b49c43182fe1dd3de88c4a06101fc1f8d6139579492cca024fe7db0bd605ad17f17bfaab7d62fb0b847e05f9c41fbfaf79a513efae1ba322990f1327d42eabce0b83ee4fb2b875a3c4f9a1b2", 0x7e}, {&(0x7f0000000200)="993ccb04b5af9377cad757d9dbbe8345526644635ab0ecc50c5c9b41303e1e1f5b1f6161ff3f0a61f3f51dcf5eab537b55b5db80ddea43032815b7908ef405941077ae8e58627fe7265438edb56ef1b6918735c74b3b8fb318d24c30d06cd07d15f385dfd52cd11a49d23837a38ef8284140bcc827accc91e3fb964378ab5da48352949a0f4b27797b96b083028f2f6bdb579e6c1ea1809c644b8e841bb7bc0eb312d29e9fea73a71744649b830f244576a3b1b8f50150c6379a7ada43987439be4e1258efbf5d325ee5f0ad6c9d909bd73a187d299cd9d782beb7a8b2524cf2b61d2dba7e4acf6764b73c9a034907cdd5b7f547", 0xf4}, {&(0x7f0000000300)="130127c0749a951379b88d7ac86bd00a069d3e5793db16848cac09380ca3c6045e088493f74bbdd96015c04cc03eae1802359cf0a739df19bbbc910c3256b1724713e6e5c4be6c2fd26afc35a60e33dc091785fd017c569eea7264d1416c4ee26bc35c2a3ee4c8f285c9da4f7d78ed6613140dfff54f048b51827b8380edffcbbc154571185532f83a58dcf55a3657ebb73d8a261228568bf32c1e5ed7a414a2f8b30a24d5b952ae26d33311c4d23fa6db921a7464444692273f8476e09803bb860b51baeabc34a7828152bba3533e16df0300294a425b07d1a38b122b7ca71b5dfb620963", 0xe5}, {&(0x7f0000000080)="24c7145919fb2421ddb6ab6620204c0441c838579e3bbb9693a8c127c2c88f33b33f1766e6b65d233d3216d16c5ebbe3342d", 0x32}], 0x4, &(0x7f00000004c0)=[@rights={{0x18, 0x1, 0x1, [0xffffffffffffffff, 0xffffffffffffffff]}}, @rights={{0x1c, 0x1, 0x1, [0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff]}}, @cred={{0x1c}}, @rights={{0x30, 0x1, 0x1, [0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff]}}, @cred={{0x1c, 0x1, 0x2, {0x0, 0x0, r3}}}], 0xa8, 0x40}, 0x20008840) getsockopt$sock_cred(r1, 0x1, 0x11, &(0x7f0000006c40)={0x0, 0x0, 0x0}, &(0x7f0000006c80)=0xc) (async, rerun: 64) r5 = socket$nl_route(0x10, 0x3, 0x0) (rerun: 64) sendmsg$nl_route_sched(r5, &(0x7f0000000240)={0x0, 0x0, &(0x7f00000002c0)={&(0x7f0000000040)=ANY=[], 0x24}, 0x1, 0x0, 0x0, 0x80}, 0x0) (async, rerun: 64) ioctl$sock_SIOCSIFVLAN_ADD_VLAN_CMD(r5, 0x8983, &(0x7f00000000c0)={0x0, 'xfrm0\x00'}) (async, rerun: 64) r6 = epoll_create1(0x0) r7 = socket$can_raw(0x1d, 0x3, 0x1) (async, rerun: 32) r8 = socket$inet6_icmp_raw(0xa, 0x3, 0x3a) (async, rerun: 32) getsockopt$sock_cred(r1, 0x1, 0x11, &(0x7f00000083c0)={0x0, 0x0}, &(0x7f0000008400)=0xc) (async) r10 = socket$inet6_tcp(0xa, 0x1, 0x0) sendto$inet6(r10, 0x0, 0x0, 0x20000004, &(0x7f0000000080)={0xa, 0x4e22}, 0x1c) recvfrom$inet6(r10, &(0x7f0000000000)=""/26, 0x1a, 0x40004050, &(0x7f0000001880)={0xa, 0x0, 0x0, @rand_addr=' \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02'}, 0x1c) (async, rerun: 32) r11 = socket$nl_route(0x10, 0x3, 0x0) (rerun: 32) sendmsg$nl_route_sched(r11, &(0x7f0000000240)={0x0, 0x0, &(0x7f00000002c0)={&(0x7f0000000040)=ANY=[], 0x24}, 0x1, 0x0, 0x0, 0x80}, 0x0) (async) ioctl$sock_SIOCSIFVLAN_ADD_VLAN_CMD(r11, 0x8983, &(0x7f00000000c0)={0x0, 'xfrm0\x00'}) r12 = accept4$inet(r0, &(0x7f0000008440), &(0x7f0000008480)=0x10, 0x100000) sendmmsg$unix(0xffffffffffffffff, &(0x7f0000008580)=[{{&(0x7f0000004240)=@file={0x1, './file0\x00'}, 0x6e, &(0x7f0000005300)=[{&(0x7f00000042c0)="0bc0", 0x2}, {&(0x7f0000004300)="adf4e18f1d9b8ab32a273f0aaa8b3e31b9681a893b97a134a07ef6f9b91dba0bb7ccec423828d8d1cfffcee97f775de89a9fa4387637ff8fd0d6f7f32d68eecb3c982de2fd17320d29ceda7988a293074f7021ec1d3e88285d314f9721d44af57b474ae56abc732c9812e478d357af5eef0b08cf1fde50185e220dffe3857ea3a46f0f78ccb4d1f7d2c7956f9ec30c415694119ff86f4db5692a5074ddb6daef178d7ce5e01f611ba54ab000a007464766fa4e3845ff3ed9f8a279cef6ae088c5da94183e93cd4efe8d4e21cddf5b85fe76bd3657d7db9d8896be7b65e8e5723dc650008532549c0420aedc65c217e9281113bc3ad3dc8ad593a0b64b613f47fdd3d36e46b32010b124dfa0270f274d788a052f2b3934f25292b57954fc09a51bcf7828e6c3db7ad9d3d5d8bd162b71c5a2564e62186e0c96e7e8d1308ed264fa0fbcccd90b2095488ba4939ee7b52b156536fd15856d2b4abbe814ba479d03e62888785fc3ee003b98a197c98f0b4c7af6c58c3f3830989d97a3fca1ecc9c739d225c39e3e3e7d97f58a7051f6f9c7bda689913eec1ff028657a6e7d81265a776a24d34e62834423614952778c4da2e96b03387b2d35979282b8f31fcca7770afd761b94d789dc27ef7bd76b74266ade0ec1efe302a93a67f3fb2256a2833bb464ec16308c9cd7ee5fb6d2c129b1d746e1e43bea8ad87e06522fef027da4bfd108113dcc86ad519365b7b31b9308252f8a6d688d2036347d240d9892a5620f002c1de2f3038ee984749a0501eed904d1ec2ee4a0a84695bcf9cf2145b2ae86a3697e0a06ac3ac804a7166c08bb56a46d36e6f9e0862352e8d8ac50a8c4cf900a637e28c6ce20afddbebc82cd6e3aa89a67d12c31d9d728181b084d39953fb6b939caf91eccc54d888a863bf417bb53713415ad370e273b7af774cd46d0d3ad914074a962a20ce28d160d12e8a6c1ab417978f6b736f50917386acb7fcc2b95575e0959d442600f18c0ca36288ac29677bb7a681989e90fce58cf6a1fc35e50a1def65eab20222666e15ae51e9766a895058edbad2567e36a9356f4be2ed8a3502eb0423dd0ac92d2b15239ef86e21b1f85eba58dc0743cdbcaf2a72dfc1bcdffc3929bfc5a6f34b1bb45cb77fe2b5085cd5265cade91177032ffffedf75106cb31fca426f5345ac66bc624e35d9bc6a7c725a8ddd337105ba56d61dd3803447840a53dbd1096d477c878d36fc002482a1f82a9620bb073328338018ed71346781280b7e00d54a10bedf3b5535516f5591f882d018cc95c35716bc6bad68cb5533df0eeb867937fc72f9b03b4fc0bf902c83d8a2edfdd579e752ab75a75aaa7c3d9f1c8baa53720a1b8d0345a7b08a69c2cea389214dafd96bccd6c22f251fc689d8f08a657f7710548c91161098b1bf73b70353acb205a8bd14fc87634f1ee02c9e8cd4e20798b54b3e5de810bd1b21e53573b6f73684f258ba461f686fdfd089591807d179c9c2c57926e1b4456f9ccb1a375aea584d728acedac6dfb29efc561e382f257a68f673715b1199fdea7f8bf6de73eff734aacf64ac8a2981d7cceb904f22f8b24ca76136878f4d55d22cee5a9225de345140875511b78c1df1264380285ace0e534f7cc9849e057dbf5329707316036a3019acb0f396801dbba6ec7198ed93c44de035bed7472017af9ec4a37e174ebba8c48637033c8749c165c80f0f4585f7fab064b07e825852e450fa4f6239291dd4f4ba9f609c98a21c1e05715f8f5576263881cf8b183de45d3207e4ab616dd9372687a161fe234b72c5c7bd4d2d747adad6098e4387f37d033678bc09004433980c3cf7f659abca1390017bd3b7dcf72810fefc1ccfcb91d65a8830dab0a56176ae4fbc1b22179a508d8c3105ea5c0cbfbb87391fc33d4a3cee5444d80b85a4b1112ce3bc3eff2432ec6a8f1abb7db9245fbff203a5f84a33cf2ba5e82b2a3b8008568ca867500935cf66171a58ccd1806df4e40c909a42264efc6addd544e086577e7bc7631416ea7272f3b2e269481797d60eff296f3c87381847fe762004ca07360bdbabc8545e34a9177e4f21bc03254c146e003daccb625df3a530008c2a23e4e5c86cf48dba657905ce630e742d93d3f5f595ee08f8ad168841775ceb397e4f3d3a3880093a901216d032dba4d5a92037dcb1bfe2dc9f48a0603a2b49f4c128b404ebe29efa41d69447b353b73eb4993f3c9d0a3533d55fa405f4048cd194dcd5be4bf5ea58603379ba40301d72ca3be0d22cbbc85ac3448f84b19c61b57cd084c083125b2a688ce119bb121e59fc5089cbccfb1a451822dd4611f201ab7d1905116b14d71bbc2843adbaa5f3fe45827d6d5dadc0a600ea9e7204b019440a3cceaaec42383f7c6c34990350acd7f9c8da8d6161fe9576695067a2da9ef2e6fc839828aa460973c8676af7296dac2c3ebe9a28019a814d8f3b3d5c8373fc8a7b5dcbfb1d2fd30ee3854e743f54dca7ec6883cde9b3e0e3d6ed1cd1ce3f8ed944c78c75b3aaaee41eaf76d53ba9cc6e3c26855a5030a8fe33902e1fed83685ce1237464fa8048a5ae58e5e53225fa5971a7375d3dc6eff49139ce71d028abf853f9dcd44027533947be345b6a2bc5536367eefc30521d6045dfdb27196743f96b9cdff6772d8e228192bdf92c7737be0b130cb4011692a32495e15eefab2a96f346e376960eab4794f59523091b11aa7936d5dcfdeb32a4ed03f4531c1ba24d019d9f85f7fd4d3231f089cf1170d5cb445c92e7367e2693c145401fa701be03dc84188d8694eb0006830ce45bfefdb1d8ff289173dd153519b1939433a97603b8e625eb32366ae864a9940a2d03e1f06e87ed663d447dcf78c05b9254c5ab5a23b3cc97e3d42653d788cf8519e5d765e843f0fbb23be2c72c1d2697da06b1037a17d24dafaf645116b1df4fdbf1a9b5704f24b210c5e3a5985489e6725c48df3e3a132ceaf82c68fa609cdde6febc7fee498fcadd38d56ac189359047a4a914919da06f066be22f761611b6ccee116431484d9ed953ccd6c2e1cde8432138607310feee52fc5868376c4935152d5fc4d52fe4eeaccae40aeb9ae99ab0daafee803882cac7554c661ea84324708ee6de2ed4c95b4d52c192c2b6a22630c7eda55005dfa44361e7b275db00523e01fd0f8da85114a7cbac0dde4c94ffe6077b17b7ea6b3d698f9a3e79c66b398831eff8802544d3acd8365586d7ae4524720e1dbf19742f9e7cc0ea0312c3ab3f2cbf7334b0142d96794a8fa33e2aaa230cccd4e8816c9b42bcafaa2f96144f048632efc1153fd3e831fcf38d540d63c2d87b24123c4a41e1e3d4c62be6c9c83a4e043448c207fde0cadb025516316b1d74d800467bbf43748c1df31a375e713a1313e53b17f8f19fae6916bfa435ff3a344af287a5ac481f21f87b2fa8c6ff0d3d7aaa08ab6d3cb541f0e9a106de1a5dc126b3beb2042415ff032cc820f554c5752d12566e35b31ce4b59245fad05c8cb487527a0eca4089db7442247fb0cf9d6885f8b85cbae0bbd5040c485639f0fd6897f056701fe8235312410273709d1af8b2f54e8dc768299b3488426d1a87552b7ff09520aa3cd1f90b684510a6c0b5f93e53795101abaec510822f43316aadfe2f9cf84d3d9ac181795013b02386a821bea5d41112e07958f1c85bc32b48f97a89ead6e70071095319619d215e3b67a4c8eaacffa288865fb24c8686791d8de5a927e7145aebd1061db38b259c4b51f4d9c1433776717e5753491140cf3977eb178dfc0d64a8a004ff80c79ff411f0e3e9ba42ab58bc0d02c691cfb88e385c561db9e41cd75834e2aaeb493ee3c419194015fc4d7de15cea6ac4b5feb13cf96e89beb0f7dd3b1eff142ca856b09c4a9d18b89ac8aca39455b23724f268f3f0d5c9f909b74b0cdd133e3c79928c4e1d5d2c6d203a5ecbb2085dfedbec0d2078c9e6322dd1578ebe7602533085928a153b09adc55f7bf88e928ccd1fe836d787a29e8e5859f6450d9678865d3096706caa0dbd444961e5666e8f589ca7af098e2439768f3f4f7b7014966b8fc7fcb479e87309101abbac4a40a8a0384b6682ae75c18804202f4005464ab65f4931b84a4d59ded8fc42f1641b96c1f1b6cfc0581dc9c242b0d2b92e2e0183e02690d3bfaadbd8a23be04dda9cf88cd4d502b7e96c08fcf68ced3a9fe1c8f1789880c987608a657bec19b72210e500027baa0039f1198690bb91f26d7ade84fc146feb500c53c5d5a9e87de98815d4d0dd56ebd195909f5e4610ccc48dd1e1dfeaf276316a80d1a476f3014ecefde04357acea0f4310eb1f245e74cbb534261fd041d295a6dc3ca1638474737408474d00e21e3e9d8e84bb8fa004d403d5b14dcfb7d3400d9b57eb123ab21b12a2c0dd962094d8039b632d6431843879af91cb0d45ca014019870356a87817a49cd8d8a94e51dff9d9082abb568ca655a9eeb0df0c1b7cecc93bd905ccde0615c97af991b4deb779e9512b15970472de6ba972153d78e216c29dd355914c1120f75d460c6e894e4995b1c0a3ade9a1f3fe95ae33116a5bb157b4fc99157399522e4f78f1b60eae3088727ad6d7ebbe40c370269fc7fbf513b25dbd072d1049c1c3f0c5cb4de7d7679a50d23a518881e908dd73c5ee46cfbb4fd24bb6031ece8b1fa1996e234ecc4e2464ee7ccf28e3f9b9471f261cc6034f590d389d06bb5e3679ea25d00d3a411f648d337f9ea27387a0b09efe9d2dbc5656284c6832345a863af9a3b6c3a7a90907a40e361f17c5594e1b5369e53ef10570bb9e8c30e38c96788b7ed92ebe93e77347585ae567ef19f8cc5e6b34648de8a79e67a5b4e51f3222c8d6e6b694be1b7a2294d04bd9747e9c27ab90012089c4362e9b8dfbfd7302360851211ecb9c1b8c52dc23d10160a88b862442c0e871c153a5cc7b50f72a4a9437e114b6bfe01c3a5bd42bda1abae4088d99aaaa005b070e606ebcc73c075402b527f65f1f18bfd6f40e90c05c3e4b45a416559f0d91b2403d8cb2882b257976ddb2767cd7b2d7b7465e39f0a3ff977d973a1a37aaaf56b3a0510734f138c9fabcc2c92756b71dd6e9c9ca8f74353dba76fe94bc5e4794e43a361bfc882a895c718ef4e96ec24cf6b46f5d35f7c22b681ae4952cf41229dbac1b02386ee5c33a91730053f48202f090e0fc01fb99c1bd975cbfacc464702414aadcce59dbf76145cd418972021b6c622d07aa88e8f5e470988dbaf6a159726635ce8266eb163e79bdcb25d92425286492cd1234772393021697eb1f0cb6a38fa9859be8a6fc81643073949fe14aca7d612308f64cd3384416ed3033aba9571361a10ae030845e021e4d89617a4459397605c13a1b73fe74ca646e4135db5caa4a9fdcd27c35a88bb085deee56266401e0399c291f230f2411c3e0d7890a8bb3e074ad8af52ee6f588f028e6da37c82d7629f3f232fc3a205b8ac41d6a697b93f0f7dad836f92d7f424c7c93c5e1e4b77588a47e713366cf367d3ab829121e426b727b0383e1cc018907e4f942368ec928ea2a1b689f0a59b890c65744db0f292de7b85f3213abe26869faede3c8fd5893497f42e07ca8ff436a6e45c95844b9cb603c040f23ee87265b346300a9fd819d54e648c03a3291e2a20a17df95e1c6f1c2b19ecaefae12dc44557f077ffdfcea8eec8a029ec9dfcf65d7228889e83222150827296f9c0e2bcd81ee4e26cbece4ac7a7e5eccab09921f6e1226b172f4c67103343c527b4f96e8300b5dc22e39cd1ba13c7b07bccc", 0x1000}], 0x2, &(0x7f0000005340)=[@cred={{0x1c, 0x1, 0x2, {0x0, 0x0, 0xee01}}}], 0x20, 0x4001}}, {{&(0x7f0000005380)=@file={0x0, './file0\x00'}, 0x6e, &(0x7f0000005780)=[{&(0x7f0000005400)="5d592ba9474f4e728ba6341615bb26ffea36273ba0992d25331dc350b0d02a5d", 0x20}, {&(0x7f0000005440)="08c04f19ee3ba1eb50729810c9ec7e185d03290af8c7f019d24a7e1fa8ec8c49b3934c79e1e8073f9f2c6a7641ad40641c4aa3631b83702440181d8c1e5580cbad2fbf79d92950c7cea314b5d7cbb0a0afb78087d756e6c15a994111443e2a8d59ec8d7077a5fc94a59fccc1a72397c490e17a7288ae4239f97bf9368f1db42ece082e70b70cd567602a4535c5d8e8a104d092ff5e05debe64ab8d4fd407175a6283b58bd4214d888bdd19a6c3bf93c517777d8ea60e974699f77a1b187c769e37c15240cd0d92e1a5e9d833e73324cb1b8c170fee5a02c9da341ae09a8a4e01f0fa448f502c0c3afe7e4e3bff9c720593e481052bf9829191f3b865ab", 0xfd}, {&(0x7f0000005540)="a17df022acee196f3a0ab61e7ada472a8b42f2dbe1262db834f7ae9cefd0cad0b9d696621103de3dda4dada3f3319751ea0e1dca38c4084da884a524b7f790c9664b92d745cbc1cb352fe7d968a6f430e1935ca471a261f6074387adf333b969e2a7158fb62617d20b0354e130578be1d01c1995ff6076b58e431b5384ab093f30e7e992b430edeee329b2935db775e2af4c2b49af0bb931b2106e01139ff28d069e90629f4a3ab07c39a7152138923cc4ac1827655e3d0e78ae9be24eae725a7ad2ee86ecb2942ec38a86149fa6c1b30e4be6613fa7bd3432b5de84b63dbad0b860821f51585b5b95df257c422fba8ececb38", 0xf3}, {&(0x7f0000005640)="91a2585f2ecffddadbd0cbf7b6359eaab6f865ff7521084bbf408ab3effcf557f88a4fd780a8ee9d7ea8f110bb3dde5d2fa328137b284168d196c9bf187e1b3e5ae836fadecd004c5d41983a5d8f55dff91e6263aba40fed1ecb69ff78b80a638e117b065a44711fa4fcf131ad4adf138db269a0535099724c176c0cb20e57a63fea1d8923fdcebda03f741d96fb7e06a015e1f9b95d01c50f258ad34a041524542d0701b5d1e9de8322126ec6057200d288d4c6c645cdc43fa245872567b9860c944abd4aec534bc6eb29511d8fcd61f2e2ad45704205e573023cf44577352eec8eaabd", 0xe4}, {&(0x7f0000005740)="53d48fbc3dbb9d6e140f1d5e77eba35c9fdfbea14a60e1253016c006232ff984598a1d1183570d99956c1c7f7205f18e91a0", 0x32}], 0x5, &(0x7f0000005800)=[@cred={{0x1c, 0x1, 0x2, {0x0, 0x0, 0xee00}}}, @cred={{0x1c, 0x1, 0x2, {0x0, 0xee01}}}], 0x40, 0x84}}, {{0x0, 0x0, &(0x7f0000006b40)=[{&(0x7f0000005840)="bedc24dba49e008f9578d3c321f1aafd5116ae65c2fda5f7", 0x18}, {&(0x7f0000005880)="f795ba464eb04b3d849e41b6e4c6ba8ae88331d88590d61ffe3420b91bd3f9265dd2542e77c4c555f685ac296e3bd7082ae0e28631e430c02093662b578c0af81bb862630639372b577bbe55a3be34a6438e4254edef392294c505bf189f00623199212f873d54a9d3", 0x69}, {&(0x7f0000005900)="1d03969f0e6786f444440614743c78dad492038d4e7d6de69266b4aa66b47ddb7f06b51e77ce53389b48dd0a7211f43014d8ca872dea603abe32971024817a62b3509d9cc13772295f5792aa14a3fb61d33490e994b6c369a6a913a9507985f2d371a4b3d20e9c9e0f27cdf729e5959f7edf01ccd17c5df07a3370f976c98367f93888f88461fcdfa9772de3eaff44b3", 0x90}, {&(0x7f00000059c0)="66444bd856da6222775540e315e384f358c142f834fa7ff6b54e34c10a6fa940b91e1a5cade9b32eaca4693691b65813ecb9989090e85ef0fa5ab48152ef1e4e87b2be50b3537ade3bde326d9460a2b51096c85d05a73a3c97cae5f7b973b086e988e70dfd91a9455943ef3e618a02b2ca0f1cc8d5a1b6649d38f8db7927045f11ca37b90d05297ff4f2307499c70fec51822858fcd89a17257ea15c0946f7d5959bef8f5d720c7e58e13b676b878ed39e29c55dc32ff91d28eb80796d0aab9a3936f0eac6d85544d90f2702", 0xcc}, {&(0x7f0000005ac0)="e38094f454c376337e8b863126328d4ec1", 0x11}, {&(0x7f0000005b00)="548b0a55c94ff39b5893602d102d271c195def40708417b7fe21c2b1167c1d5b424d5d63ad40f474476ad6107bd7850a2ef611b5268a8094882c13afb419570d397a2dbb46c7724c56ecbd149be0ce199524e1c33eed22bc26e738cbfe86a07c83a94fc483cb9ef9b411e868f112a8a4684e056a2943fddcd764b9730caca6510eacb6f278c0360c9cdb7f2d3b6e6cbabf928d67c8448d63a959348429109c644b4099660b5b22e52149b4fe0d8af2adbdebf136e99f88585ccc26d108070ae58c5f3510c66fa03df700c77a9d1823de447c4cab2f131de6fb800a87f005374bf4e40084a2f0c476119770a04aa9a0bde42f0bdf69b9158801d658be9f31fe01dae6292bdee540030da638426c386277a8efd4e7603b60e7cc6d6c2bb43e188856d890865094069119b271604dd8b39085a1a4f00dc2e4f0d61a9f66fa0f8e4973d4a0255f760406875b9eb7628628f3ab48a1fbb270ac07ecd4ccbfe06d5c2b090f5752bcb4b7a6241f1fc1c482e79d5b2d217ca76dcae3ac3ad5b5ce355d81fa42b028e17e84e1f1bae9acccceef9e85092baca8c6f4f8170c85bfe65989c968a945bc136cadc3f460b14870e3a92df5339e8961660fa43711670386c04842601165d072c8d1f5edbc6fdbe29ed067ea349a71a8905498f66ab04d6326ff30dbd626910572035b488683bd5200210020959b6521dbe89fef399cd0b215e05acaea588ad35be98a21c9e64410215a56fdb38141505d879c0d539d0dd80728f2d41bfb523f4177c75253fb40c9dd03754b86d1aeaf3c1580ff235e0c9984c89e575df6e9c91de9e6b5912c83e78e03e20cea081fb0e5a82b3814a95f4b86f67c7fb59e6a4aa50c059b4d01dcec078a7689f850c8d7d36bb6c7e7d0363de2de2aeab990cebd9ce2124e834b518c54408c770bf8d6b262130d0e04699ec623db2818b3c472731a124ae4d42021a0b4721eed496c83aa3629d4aa2d2bb0cf11602cd644f72254005fc3b51f7a59152f64b190657b8f1370a4a95aa6307c8889f0df13a0271231df9df0747698ecbccfe1bfc3693deed692d5acb57d0aaccdef646c40bafca7c044c00ae8a338a95ba4c29d45bc20422a5cb44656479bee4c1a07e7b633c447b0f19a7e9dd5ce3b26fbe66d6f21c62a665d2772475ddbec460129f7594c0a749e73b3271770c4a9b53a41adcd623d5fa2dfa5ebbd0d50cbb5e590dec93615e604f0e9b5d171571697aefe9f6b539dfea5ab8cd8720878b61a0490e14e1a7fa15dde65856839471e0ef17263cb8faa9e90a18cb92ceeea45611a9eb3611730574db7689a68acbbcc25ee178671baaf77e32022ff1fb094008ffce07c2e459c283861dd4df3caeff033b7aebc59f7c19f901fe30cfe8bd4488f73b05ef4418d5d0e975196634352fc4a782876e93f4dd0d826d7b4fdfa367e45459949680825515b8356ca0d66c57b90761c5aa0df445d0dcfe8f81c50a7bf69a4e723aeab8c930bfc51a102c6237e8d1c82f11c50a46dd8058990e94d223262252cc14395b3cb9c69b970f8cb1a2dec6701498df4b22c4f8712a361b34176be95940c9826ec11bb8c6ac7b4a35161ed0f3c8e924a7765448b8cf6096f7e2f1ec0a1d3ba67a6dd3f1c73df2c6e4942931e7adb6720c82ede735e64a8c645e1a2d790c9c0c17874bb7988424a5871d1d465fcaa9d6a36e03cf5e2a0af120b07531b6a973c1024f91cef25be4d218d8008197369757e0d74fba417a435c4002cd211e19976eac67e3c7231328f49dc619dcfff276aec1740bf21a4609acde2192025a04129adb1c1eaeb71bb891d1bd633b8648693d58ccf88855801f83817958cf1a2f39e9701995d6d939d1896a9bceece557888293aa003b0a5ab473242ac077dfc96924363d64c76f6fe07823adf0243e5f02a8184895e72166eed0278e71c51d44203899b72711091fb46995d9c1b8032dac8b968176d6fd2c00f50919d7bf4f7f2c157922856de0a29cbd7aaa42b042d10f481910d3a678fbb3c075a8f43bd16d328729466a2919a76d46d70fb1be4855d2cfe211b53616545509b2426b76544c6e1b2513bd125aab4120629a3b551bbd6ebd90ecaa5a9c898c7c5d7a88598fac183f41f6a52602980dd7ce20f1e4574ac2820885720c92b5e20798f3e7b3b82603f9a4464f727bc0d0c569a070737ac12fbd82594e10deb906c1d1dabd3133492cb2ffdc43166d34bcce7294a4c3aadfceb603791b8417a9128eddf382f9e2c49b916864837554609ef9a2f8c2430f3e5d5e0133bfd232af3f93d007f259b8c4ef850d3e00db53c23d5b37d085d38ab742148e7a9425908ae4bee58c74002a3e186554c7313be11d48732cf1ebafee5134366c46b2ee06ff477ee090cca57c671a6a74d2e76f4d78786563547f74506a3c8da9ee3eeb534fb2a15e42e90b0f01d83190fdca7be281ae9a2fc1136f4f3d711591895c65d1cb424903547645624763e91b74ee71889f50610086bc6007462e24c23fdddb1c5d147235e85e6ae0e356ea19443d0a80c76b7924b64a377e620cb767b8059dac86b20185eed2372cee82220d2418407cb38b2f99e26c5467da910d9a4f968480a080fcd14fb5b2dd424e6b1df7304ea730adb054e7c072ec69636d157456adcb5e1d7d1b34fb1c5711d9387a1fe276edfb3101514343789211f23841f5c6565b7345cec480afa002c4a0c23e14417d5f72144f5aa4795957a2c0ac0f54f23b86a5907c9dfd79cb7957857a63d11e124df7676b1ce0c50af48f1f22a30aca08d62c58afe3e5199cb870a2d644d0ff480b07b9587b2ffafc38bb8d6ec307c4c797e376a7b4078052ee7b52ea4816f73d087e88ccf35604c916718f7cf9ef2b676d3c5fd609f4c28dcd39dd476b3c68f7827993fb71e79696b0c3b338fbce8a436ed5c11dbb903c9e83ec7d31cb14ca6048eda714abc143fde49b9c231ea78eb4d9f92fe89a124bebf08019e2f43f75a91c9e884f2dc3240cfdafd446f08ff3c304894836b44c9b78a8f06317a354f6d26eac23871d9bb865dd76cf6b81c830b12ff4cd12ec0e8486b1bed4f214950c238b0841b85369445a7fd17ab43b599869167200e242ed9e6c1df344391092da46ac15af337550307909d6b586be23a340c18d8b47731f3a7dcfd4041c60a3e62dcb1377808079c8a4440a952140e8f69fd2efc69ee4ef20253a4110e2360433b5e4e066a3a9afb76eb412b1bf20faf4fe2c4ffd696dcffb812030dc045fe15e68efd591c32a896912f7f9c77735aa17bc37351af23e5af2a59899054436c63a6cd50d7ff0decf0f7bcbf4f20ebb178aac3b8b26ba792206b6454874b797fd1907542a6a306f568243222ea4dc12d21a81f63100be278c2e14f05489ed9b3d9a748a6ff27cfcfe7419eab5a7cacfb8f2936920bdb965ffc2d5546f98dc9faa18bd74bb2456f54852ec5142bc6f31ab199cc40c0512f8192c219cce4c88ab6b984f99b43265da03608ad5815059bd7740a594b992ca78607c978ae31f2fa690aa914c028edc8ab687cc8fb21c5612270a31a110d24c012019cb68f4ccc33592565611e73f21335e4887d80afd2037284fca6b756cf491136b20b6399d15904f1d42f583ab1a5a22caf0003f537f1f19ea3485387585b8441621939ad00c5834e212b3f09d961239573b512607d84bfb9dd2504bb524085f21edf72edf3a25a244832ab4481a007deb2d27c4c57ea6b3647643a400742521d44f8cff6610087bc99cfb8a2c72a5ce12eee8a38d75bc1570f95a5b8dbe1d298d07f667e762ef8a194b811035597aa9eda760f082a7741e6ee2b6a493be793124537c6b25b0aea754362b5c5b32d62c2619c161053d089f3e9cc8e1fae6a2658c198b389903bea459af41c830a05620d65a0e5a4e90f7e751da9609157adb1f3d497b64a420eb2e4a9549e2d935b5557a899cd3a9081cde0b7cd812d9720ed1ba568b1c440dc3016eebe01aa8d244d18c75076bc04b49439fdec070db2e3984908ad4145bb422f537b5480cb905ff70e86a3c28cc20f55ec4b4f28a8f7285325e918c62183ddc8fb172b2b6ff5ed3bfcdd600a8ee00352aa060d3282db5da0c0f1b5df3d91550051c445be5772afd9e44dc4944769bba103800d2168c5adda863bb77fdf7b44bf67255f6ca4c5c8d1ca558fcf876a27a6a3654d6b7cabe7b715022ffe5a82aa49d25b31d10d50314fb84567221374892b9394a673c1a82bf0441167e4b5e906aa303c90ccb6ee7d5268f66cf330368bcb147b2d00f71a6a019063ab9a726ee4b0094c685ba601ab33dfb8e9c37c3e5dc04bb1ea15f05c14ed1771757e95c4473b51da5210a78a3756f5c9fe7db65f18503a703a572d52142c1691758169b76ebe3d4dbd15f4d55d4b141203ce06a6deb3d26e8c62a58ca26093c415f08fc01c34ba4d6ac21abc2c0cdca269c89924f562c4a1ce81fab80623ee0841f9dbe7e4f426d95f48e3277d8950c0f4ab172e82f3a39a18cd16b2f6f03bfd8671950dec0ac22ebf16fbc9e8434e3c4e17fc643bfd9c8d2a1abfe44bbaf0a9c77dc229c54cefd51092ed1db2c669e77f7c5545ede091369671d84fa34e94b0155d8313fc46c5634d493aeb050d1e329c9fbc11ab5eb9c5e148a797c11622e75cea695f2f2c5fcc8d31558ec409dc0772b3c5eaafcb9f34b7b2978e95be78177f9c1f8629d204ae66b0c81dbfd64fb38885d1664e3b241e6872be11b190178d7c880df2495deb2f8dbe6233b52191b776196b7835e3dbe6ae4f6ca2865020acc84e111558c25c270cb9f500e1fa092d925080108dfceb88cf97593baa48b244542e0875550c5082a2790f1f061eea3e7ffefd907b1df212e16cfac7bfc692fd023a675a5dd66ff71ae35d75a90577dfc01333a562d9561257680e231c0e2c66026f9ada2c3615c8b89910ac20489850b3555dc57b7052fc07936d4bbe73b71ec7bd0daa453fa5d6f4eee48729879c58455dc73fb5ffc6d385b50ee7c68c49d5b17b1b00d013831932860e6f688b6cb1b6e4559105d291ad370adc3d45da0c0c05cd3570348127d005c402c8de4d04df399210a8b2bc67ab212758f40d03e8b6983d6fd7117f77b4ea1e603c6319868380adbf98bd5dd9182f84d8cc3545853c52e9874f293f304342545018d566090970458c6fd6a473473cfc160aedf86b7eab2815c74bdfd63e98762191919f11b0c20b8d21c5907d8bdad0cc234b1db060bfe8b56b4eb96baf28d6425712d4d895cc08e0450e3044fc5abf7000d63dca8ccf4fd34fd91d27c0d572253b30f9b1962e9b94903cf1d8e44ab35216d00e50da4a8f3d42b9a14ab245ebbaead50f34acc9dfeb909ee8d9bda30aa1db78658522cadb6ab34a772e86ea807ef72a14c897edaee057fe6ca0ad34f26b592a5934332e790d1adef4a7a23f25288f03afc18c885b122351c5b7bd42de14a51772fe193dbda60dee7820b4f45c135e022b368556544e1b36cb4459fd92b622538501e36eea0ceec51d2a664985139cd5124df8f018d4371bc873e7f5d9e3e5d74e7e89a58fccb642da52d9f7051ad6e13cacf4047d8f630270fd3eb05788d221a92d8e39d0fdd77eb0c00a493c038145dcd527792a61eb4f299f19ff9578c9a83381a1e4de1e39076a5851b650c051fadf6b5cdd4f5f614bc5a5f6572eadcf83e23d05c43711e99d8d747a5d59d25835bb4dcfd9471fb860abb42b1e3fb27ab1670c36e6313a49660ee0f4a100804be0ef444536fe80cc8ca08985eef8a49b05577bcb078888", 0x1000}, {&(0x7f0000006b00)="355847449e8ca1c2a9e8898c3e898a31f1ae35ad", 0x14}], 0x7, &(0x7f0000006cc0)=[@rights={{0x38, 0x1, 0x1, [0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, r0, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, r1, 0xffffffffffffffff]}}, @cred={{0x1c, 0x1, 0x2, {0x0, 0x0, r4}}}, @rights={{0x1c, 0x1, 0x1, [0xffffffffffffffff, r2, r5]}}], 0x78}}, {{&(0x7f0000006d40)=@abs={0x1, 0x0, 0x4e22}, 0x6e, &(0x7f0000008040)=[{&(0x7f0000006dc0)="0e36e4cf4e414bd7783e1ad6829fb9ace250e78d8f19cc8301642d8bd1036c69f08dc00bde392bbc4befee3ec3bd09a688d20928eb88decb0c2eef59dde5a180210c59fd595b58e74f9beb1a08d8bfa61e27614cb42213dbfb106f1de50bd7414a90202fc33c38d9b0a0d4598c6fce497565c805dfd80fa5784755b79df6f89c8fbc7f36e6f163478c7fa2945f3d30e46e5fc85d338039", 0x97}, {&(0x7f0000006e80)="19d213ef10cf7e9fa4a1a2006b739a7957fce4932402a0e6123f5da2884242e6f63b50445ded73228b5731bd10c17f3307c71ad03f9617ba0b37ba2439afbd5ca9cb4f03a71736bc2c2eadddba3c0ac005cfcb031452f828b190787e0269fa264449ea369be0564d523d5eb81072c00c20f42f686cf6fd49823084bafca0521fa02899e05cec27bdc0ea237491b36afe8caad2d5045bac8dbe200533d89bc5d1071effd4fe442f2187d555ac8096d663fd6ac1123881eabb516483a3d72d75a9052a26aa870944a6d60540679a98f8ebb4d0428997fb8cc44e35b630130e090bdb2a10701437d6ef86771d85e90c4d5933697d04f54c90177a04ad45b13ae26404e9f60a6694e11c208cfd34e69f9027dbe16dc9eda0892430765d49fd0a65a37b7707b040b967df04ecf5159d6800a4d8d749080c02be772eabe9ebbf5ecf286d132ab406875158418df9ea37152fc0e3b758a075d0d3511f2bafd4ee9e7107c5395c1b4fc4ab442885890b75747f21415ed5cf547a8e29adc7c1bfd7f8754ea27d8ae5fe66604f6a587fb15c5436331c35bbcf03b30afe8d993b06a735c7d3b23ed58f9da2508b768f6aabf446218bc32074ca8241f379a669786ef2435e70d2791113aa47b7ce8a9fa5ce7b506c907f88cadf566a592b33661e0ceb9cfc267785d4aff387884cd02d6ee2ed5efdf84a9fe819179c0ae0187100fc51f53a3cb2114f1f851758ea14d360062c00d6ce7f0dc1b5b184bc4269d4af1110e2af0045c1ec450b3830a954f4b39622adab39fc07e7d6fab61f30e5e3c505ba15af565b75cd42a59cbf3ead159c4c334bde71bccd515a9875e719b6750d323e9fd44892f7a66870f47077f3c22b1b478918c804aacc811eefabbe838628124eb85b755c92a232bdc79d1981df85d281562b7d16bd4dc97b15206c87247cb86119b0dffe6b3863e7396f372c3b76f542b2f5cda37d41e9e296ca021cee0d6c14ee6c65d5ae560faad98e46b7042289e0b24f7f6501e17d09a89039fe0e5aa26fce6f0db8196926cc7e271a1557daafbf559a89d0495bd90a9f15466cd560467db32f04174974e65e47bd91b56d1d8193285838a7742388a06fd3aa4e98773734a2a9dcf9a5b849bef52a9248a93c49a909cd64d1874b844097fc7edf0b25c6a42cb986640d78fb0dd1a6a5a80369ae11076fe2bfd97ac38f35b339435b5a4d8716b0e35bb2d8b7ef7a9207a6b4aaf4d2cec6af2a00c19308fa18a1ee510b2926ac2c93400c5b0d3a85ceafaa799bc57a4e5d334ca69baf347c3fcbc46bcb4530507e999141715451cd6e62fa800fb3cd90d99496560cae34ba8b8f62dc0f6594b4b9ba06009990ad8ddaf387a5fa83e1930faec4aa7eab8a38023c0d79c4f9815601f2fcefe7dfe36af7fb917dc0b6f6a5018c4274e86cd9d67cb5b3515d82e487294dc7a5d8f87b5b2a8cd5d781558d9e3ef3248b13f62851c064fb2edc0fc4f1992bb1b888cdcf583e5d49f467f388a0332965c3840eb318215ef967b627e6a62f44eaa5363e0cc8e3ccab4ded1325a22503264c268f9817b1bf16a8ffbba34b4422f30ef8ffdbe820c80da362874d5ee42ff01c67893babbdfbdc55c5bad682ce0b701745cccb38a46d95a1e82dd6d1e6299b28e255c459b6b4936f7e55db29a4b9a91ae8404cf4baede161b8fe900fc23b66e29c0aca38e625d708b24d52a1d96c8c3cbf7da2a4317e518ef857eead7dbe65d381b77f584c5e79afb7483c3c096c4167dd3b732751841b934b4650add418af4ab963f504d77f1f0e3622cfa78400f77bd7d0a5490ea9f12a47a034da54cccb098fba8f465d467be3449543f90414a17b9abdcfb69878691db7bdc21471fe2c923591765f923c6ca396d6c1f8adeb42ceea508ddb3237b5d317757d8130ff8e457f40dd220f50a20c946bc654a63c4c9a3384d655a9672c332a92e3fc83eb961697996eae926e6184da962af73aded4a865267e663d06a4189f6fc3a4e69f92bb9b8563c7b78378bb678c5a8f584a361323ef276bff5eb7fd1e8f5d4a0b649223de7776002dfd493caf3f1441f31eee3e998b845c408dd981fc851c1950046c4dcfc877bb451fd05cb71e031b40becac0e0bb3d4e08f6d71475329699705512dc1612b7b2f316efe6d87c97cf466559219f0a306366d1ff2fc103b3a83b1b57042a0b8a30d87185018fdc54ee955bcc64993d486939e5ec7567252953a8c8be1f2724f6d2f3a5f12d173914c3076dee147219fb6e5b23ba2d5a8c7cc384f05f2b3ac4270a91e6881256fbfca6cef7a5673efa9b08aee528236221090548cbc860915bd697baa4960943ee847c2735a31ee466f1ac0cdc62c87863365d219fa258b7193ea0c5366d5b408516f2e8d145c9f9a84e469463d066ce8c2e7f927f965da3bb06d82a1507b6b9c1dba26cfe738b23b95f96c93aa0abc4fc9ef970e33a56c8d53642571ce29f3cd207eb24c11bb2199b001b29dbc0b88a8df0e945dce6ac16a415f4039c66566a505b3b521970fb1d630672a0d477a7e5698b5a0bad47f52b694217893ed9b27c14a48d15c7571681cdafbbaf29ff7d587e89d1acfa2e4d1c199694c8b395a4bda36232bfe92f9b5a9a4c48560e383d7367e6e8dbb6b8a778671dd2ed7a32f050293401c37b6c37fbe00d35b48311cfb38ad5517299003521cd41f94f29c86cb9a51b0e19e947631e61126e0f23c14ec70502f57b54c7d9b05ac683e97880f99e395be449646e8ca945f571d77a0a84d0bd0646f27716c40ad7ca5e97377065d195e1d5bcdf1acae27a11af3f574931499dbe5f8e03150a249167f2042abf7b41eff4d9d7448ca07c473c7448fc35820fff76ed8bb77db6387c63b89fa1962845e51d49347bf0099c3d0f1552bb9a84e6c89a66a0e53d4e729364336fe91026e7f9aa7e78f9ece080b0c683a462c0d537982df591811527def889f5db7f86b69c5c99c98f76bb0f095a689f8b9fc70ad6543dd288d2c646ad124d3f5e46f510b90b372c4d35d757b1a8cb84a4eca9f3f403d26f1c1a8097428510e6a63984a0ab07786bad3ffd9a726a367ade3b9f2f92e2f9c4d6d8096e88143305140151d987dbc80963696839f2abc821a7a4a6da861bfcdfd5a990a951d05086575f8d6080cd63d53d97de54bfa5df7843bf5bd287db9a29a1843fab25b911cb54d5608d20cd19e5943222fa71163e73ce67f237de784c1578799d44babbe701abb557750725f25fbfd080775e86c3f71b700865b57465b0c9cbd993c4a316392b5a725346c16a066513706007d2292d6e1095d52385f21a62b24fb4b6b7f21d9f8702fda83e8352b7ac332ec2df14b2d09c3811448d46dea57523d2b31f4e6c3bf1f6325a32908fecf8cde4aab484ed13d6eeda7a456542d9942b03c1f4c054c37d9e26089990e73773310165f241bf10a6b7dc1f34d92565dc8c1ab408feff2d73e05dd3f415b8ad06736409d45d8a134d3ec5406dc3a4c01b11e1a1107eb59cd995f604ba1cc05b42e08b895f060409682f40eb561fd66e3ecc0021009f2ea20c2c1478a2e9b313e8e85d0c372821ee1623279222c395395c0bd255379928ec4f609171395d967ae706468aaf2cbe7bb53928320e6ce4d6d5958a4e28e55a2e8cd5d9fa974a2190e395bc0dd6535590758bbbf00421f8097b1e94f26fc52e582d8cd6c4185ec95eef53ccfbf22b3a1ad50911fc1e2c0e0934626bab2598b35467c47ef662d67a5a957fbf0fc334a127d576dcfbf498307529473f770ee7f4d724fcd5d324c4dd3bc675acb3877865b78353d5bf358e21694c9a10faeee71c3f965d5439b8fe699b20eb96741da88a9ff57eb7b210c79d8a744d21765613bbd0095ef7d0789a11d99873c38a6c9422a246cd89fad72938dbdbda23119945ad3e8e09db4863c1cd156cd93f6a587b7d6a413cbef77166c08b1c577072403c3c223fefab025b2c26ffa296fe7e1e5941cc9329e7ac6e19323f0a905716da36841e0916baf695e3aa7cb52a55302ea1281a895e5cb494c1eba6cf8fba751fa1a72bf102ab890aeab62a2c7590aa30e294964b656b4e112dd743fb330e87e2f3d6f0724a1137ef7eebdf8aabc1f26f1cb3235ccc7af35b129f453429446325fc60fb9929bd9e65fbfe44e58a4e12a13c55056cf9205ac63e26908b7a2c63a5a518ecba81fb14428846cd9d7b5cef0fa0b8b878a7a1e17f96a4dfbff29eb5c389e37b9528d8e943a0b3965758f7d5fb442791bee4d83d35ba74a6049b34b84ac158a0406cd5228abe61ab0ddc3f1553595553f66a2810218826a9a1868d486957a9f14b9f0fb30c2a14c7ff806044cfca508a61053019c6ffed421083370efdd3f61b628a2da1c82f7475209619b61ddf40ed4d4a0110156c52e849b54c55cf5d4335469ed7ff9d48d5d48f2d9cf29d4190be4f1994ce4c9f241336fecac84459cfe3f7dcd7a5aba7a7be4942bdbdb4a16f8c2a9b7d20a3d455c17515faead7ca4b40f935c7123d7da283dbf0371ab67b8dd5875519c2bf5e0579637a7cbdd8d094839143be38b98f9d16e458761e8fe0bfedf6cc7d1a4151927a3009278287c2f41f7df79e04b7faf13a5e7f0262ea64b1b9f27123cd827416dcdd97d006bdef1c4b8e94b8fc0112f63a1cb04fbb30af79dbf3162c0bb1ac28ae461dd72dfd7099ebb14aa3e10e8a24479438fbabf0f3f79f6f0251207aaab5891421f7b7578b9b5ae859d5a8b3d8d986b5d30da4a320763ce9eb8404ca000a4f4f43a7b3fbca1dcef7299265d1e281bec2881fa5d14dc78571059549665ed4f12cffa0dc2eda64d0adfb1cf0fb729ec6a9347ce8218626620cde8945bedee6b24fa05998f438c079a3e49113044d500aebca46288b00e4d59f56ea5cd328247212956556bc5ea77fd733de23ac768931eef324ada06c67dfe8fd92cf6690d39949f2df581265d0323807036d992c311c26f2fb969f3be650ecf8eafa16573f2563a2e2285f4a604ae2b00f89c980b8fb662baa724213f48945afee7e3b27a382b54814f460489c335cef2b6e94f87ac529a20de6cdb5291293522b2378b369e43a33edbf34d46f8cc748ec94c12d400de20cf7f2fc7e1d5a3ec1960a63287ede22ea8b5866fbc0b6a9bf0e0fc32cac172c8e59fb0d501e13391f39b4eef88c234550d088fd7a3f132581b9a2737b25d4728d4370a77f64c5d5de14e2efe9b5af4ae1437abc17a288ec3a7f5274325307486956767734260dc3a5f4a77f563699bf38201685cf352b73765687a4a8fdb3a0ae8703974c1cf7986c012ac3d089c030b9bb69066f4f5fbcbfa23e760e4f1cdb07af94fa82f2fbcdb527a741eeeea63220dcd360ea7ce086b30a2778b9da1a159abd99a4ec1fe7a0fc72cfe8cff91c75e3e98243d69686cc03e8a80ed93d87da2b67a3d5203592ffda096cc02672abcf502595a9c1d5a6093257e31d69edc8633d96d824e58e61fdea156e08d70a1da4b212b4e218caaceff6be971d59629a8e71a5bc08dfd7b4cdd5cbc0bcecdaf23503bc576ba73ccf5edd01483e133f215b41fbc17bf419ae1676a2344c280439f56a449bfb43d94c1eaa87c7bac96f6b835886e69a32548b74703f4a527f82a34af01c524195021d37550bc68a0014f822dfddf1634d55a73109bfbce6799a29b3408bb6ddfc1dcaf2b45cfd9d629732f5ada1ddf02cd15c3d4505611c5dc09ebc35a0a3ccbafb96df5fc82bf591f9da70784968d445c01b367010dd509d2cecb07bf179f74b6cf61a7e201e457200e80f", 0x1000}, {&(0x7f0000007e80)="eeaab57f845ed24e38e1738779b9847a09a5526d9a0348a299f142999dcae6c4d19012c19ff42c6d3a5d8f516b4665f522843071110d74dfeec1d371494f2554c7d7a1105bde75f9bfc1db17e3a2660604a41334a3f8445fb836b2039542ea890d95b9d022890079fba8f77a5549002f95ea80e0f7020b64618c35e646ed5c401a189e180db5b5eaef", 0x89}, {&(0x7f0000007f40)}, {&(0x7f0000007f80)="ece25ae9fd4a82800376ff97ae32a4eb2aa35fa93c3d7009f9be16235a48f368da6ba2607a28eaac33370b4d0db6f6b0323641c1f2a806974ce65311e21438275ef492b4bd96bd977a937064080713ab17d8f2c8be3bd40647822b2bb812908ece370a7134918cf8b4864d51cc9b387e89c5144304631ca9e3efce3231a1cb0496f83e0a723e269c495d87fd4f8f294e7efcfca12023c2aa8991879775b97a9e47af4d709875a91ba5271578a5ec28e00a", 0xb1}], 0x5, 0x0, 0x0, 0x4}}, {{0x0, 0x0, &(0x7f0000008140)=[{&(0x7f00000080c0)="32d2682b5e5e6d13438683d3fd48b6424a463b273a37120fac6504852be18154bad8934be23bc302796c1ee3e8705bb7eb212415234c7ecb0c71143c6caada9958a23f59e5c58d0ecc4cd34452d43602facab9cc", 0x54}], 0x1}}, {{0x0, 0x0, &(0x7f0000008380)=[{&(0x7f0000008180)="ccb2eefb57d1df2e07193eb8480dfe377e9b44b4e4ce855176f1cb1ef85c3c454377fb92ac760f8ee09d30cd521555be943b9a1fc190491d292ed608eaf91614aff82466ecaddb10ff14469a904796244e98ef82f21b8cb1a23e5149de2e2177040d26ef5c85e3de35b9bf1a4746870127de43a8f4c3ca9a48b74488b9a61d26ba842fcd1b39f892807febee9f1b8a6108ed02e2728d6f68a70885876c4253022820dda1d7564e39864f8da322ceafd0eee3e4bcc113e1dde2cce692be644685c270c6c54fe179a8d115574ffef6c7c47f4c3b7ea9235da4dbb72fe7a77b7239dca21de044a946955e8b40ec60d60fddf32a", 0xf2}, {&(0x7f0000008280)="16faf5206b83c50c942069b7d96801cf5edfeeb2b01774fb72a254870f9c7709ec8bb051901ab8b4f105798dc79b744ace4e654879fc2f808592228b30d43cacc514279c95531bbd8bbce713a3ec87021c7a4668abaed97fe991c54654e44f094b56174d701e4059a76a", 0x6a}, {&(0x7f0000008300)="00c9ed957c06da8dea8581ea7b2e2efbbd59259d47d5873b500b009a5769166006a19d86788731054fbe1b2a0b262ece2f47a50e01b7ce55853270cb474373997c", 0x41}], 0x3, &(0x7f00000084c0)=[@rights={{0x24, 0x1, 0x1, [r6, r1, r1, r7, r8]}}, @cred={{0x1c, 0x1, 0x2, {0xffffffffffffffff, r9}}}, @rights={{0x14, 0x1, 0x1, [r10]}}, @rights={{0x18, 0x1, 0x1, [r11, r0]}}, @rights={{0x14, 0x1, 0x1, [r12]}}], 0x90, 0x4000000}}], 0x6, 0x0) (async) accept$alg(r2, 0x0, 0x0) 01:55:32 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0xe201, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1982.836310][T28463] bond922: entered promiscuous mode [ 1982.842733][T28463] 8021q: adding VLAN 0 to HW filter on device bond922 [ 1982.949817][T28465] bond922: (slave bridge889): making interface the new active one [ 1982.957974][T28465] bridge889: entered promiscuous mode [ 1982.967410][T28465] bond922: (slave bridge889): Enslaving as an active interface with an up link 01:55:32 executing program 5: bpf$MAP_GET_NEXT_KEY(0x4, &(0x7f0000004000)={0x1, &(0x7f0000003e80)="db3655c7decac14b9ef20513f0f48b1838d81c53b6c2e9c4399bf698dc473ed4932d6613881a4e8562aa809ff93ee83d0edf8cfe2fbad1505874395d481e046eb6a72d", &(0x7f0000003f00)=""/220}, 0x20) (async) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(0xffffffffffffffff, &(0x7f0000004040)={'#! ', './file0', [{0x20, 'memory.events\x00'}, {0x20, 'memory.events\x00'}, {}, {0x20, '!\x9e-\x1c'}], 0xa, "b38b8319a2197289e69c0e6810b151343a89de65f352c6ae8caafe008b6477dd4f981de745e44805a5bc03e1d82fc9d38f9e1ecf21fe08a14d4756e06b58c574324d02d7ab360c20f02d252278a6bed5899d3dcc856273a6ba5f32c0ff858ccb6a0fb9d46cd308d701f646973ea1a6ddd3d06dc6107cb27c81b48e6b2dc80d61048173411c970ed04e51ae3c0eed596b1571a03188776d50ef739471d03e64ddf4d9245a23d787dd3c095e80abf5e29189c750943b37f64fbe922771f910c4939bcebb10b83afa0a9a22fec1296570cffe60"}, 0x101) (async) write$binfmt_script(r0, &(0x7f00000000c0)=ANY=[@ANYBLOB="00000000aa7809d8560000"], 0xb) (async) recvmmsg(r0, &(0x7f0000003c80)=[{{&(0x7f0000000000)=@pppol2tpv3={0x18, 0x1, {0x0, 0xffffffffffffffff, {0x2, 0x0, @initdev}}}, 0x80, &(0x7f00000002c0)=[{&(0x7f0000000100)=""/145, 0x91}, {&(0x7f00000001c0)=""/217, 0xd9}], 0x2}, 0x1}, {{&(0x7f0000000300)=@pppol2tpv3={0x18, 0x1, {0x0, 0xffffffffffffffff, {0x2, 0x0, @local}}}, 0x80, &(0x7f0000000680)=[{&(0x7f0000004180)=""/171, 0xab}, {&(0x7f0000000440)=""/243, 0xf3}, {&(0x7f0000000540)=""/4, 0x4}, {&(0x7f0000000580)=""/218, 0xda}], 0x4, &(0x7f00000006c0)=""/148, 0x94}, 0x4}, {{&(0x7f0000000780)=@sco, 0x80, &(0x7f0000000b40)=[{&(0x7f0000000800)=""/129, 0x81}, {&(0x7f00000008c0)=""/237, 0xed}, {&(0x7f00000009c0)=""/242, 0xf2}, {&(0x7f0000000ac0)=""/38, 0x26}, {&(0x7f0000003b80)=""/60, 0x3c}], 0x5, &(0x7f0000000bc0)=""/242, 0xf2}, 0x400}, {{0x0, 0x0, &(0x7f0000001f80)=[{&(0x7f0000000cc0)=""/66, 0x42}, {&(0x7f0000000d40)=""/249, 0xf9}, {&(0x7f0000000e40)=""/23, 0x17}, {&(0x7f0000000e80)=""/4096, 0x1000}, {&(0x7f0000001e80)=""/243, 0xf3}], 0x5, &(0x7f0000002000)=""/174, 0xae}, 0x10000}, {{&(0x7f00000020c0)=@rc, 0x80, &(0x7f00000036c0)=[{&(0x7f0000002140)=""/4096, 0x1000}, {&(0x7f0000003140)=""/132, 0x84}, {&(0x7f0000003200)=""/55, 0x37}, {&(0x7f0000003240)=""/163, 0xa3}, {&(0x7f0000003300)=""/222, 0xde}, {&(0x7f0000003400)=""/167, 0xa7}, {&(0x7f00000034c0)=""/177, 0xb1}, {&(0x7f0000003580)=""/85, 0x55}, {&(0x7f0000003600)=""/178, 0xb2}], 0x9}, 0x800}, {{&(0x7f0000003780)=@nfc_llcp, 0x80, &(0x7f00000038c0)=[{&(0x7f0000003800)=""/17, 0x11}, {&(0x7f0000003840)=""/102, 0x66}], 0x2}, 0x80}, {{&(0x7f0000003900)=@ll={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @dev}, 0x80, &(0x7f0000003b40)=[{&(0x7f0000003980)=""/243, 0xf3}, {&(0x7f0000003a80)=""/30, 0x1e}, {&(0x7f0000003ac0)=""/71, 0x47}], 0x3, &(0x7f0000008700)=""/233, 0xe9}, 0x8001}], 0x7, 0x40002022, &(0x7f0000003e40)={0x0, 0x989680}) (async) r1 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r1, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) (async) listen(r1, 0x0) (async) r2 = accept4(r1, 0x0, 0x0, 0x0) connect$unix(r2, &(0x7f0000000080)=@file={0x0, './file0\x00'}, 0x6e) (async) sendto$inet6(r2, &(0x7f00000000c0), 0xfffffdda, 0x0, 0x0, 0x600000000000004) (async) r3 = getgid() sendmsg$unix(0xffffffffffffffff, &(0x7f0000000580)={&(0x7f0000000100)=@abs={0x1, 0x0, 0x4e24}, 0x6e, &(0x7f0000000400)=[{&(0x7f0000000180)="94942c3d1e007dfb8404de29a8697799b1f5d6823a70813d4cc3415c6f862e8ceaac7242aef16f9f7c571f15aaacea204d20b49c43182fe1dd3de88c4a06101fc1f8d6139579492cca024fe7db0bd605ad17f17bfaab7d62fb0b847e05f9c41fbfaf79a513efae1ba322990f1327d42eabce0b83ee4fb2b875a3c4f9a1b2", 0x7e}, {&(0x7f0000000200)="993ccb04b5af9377cad757d9dbbe8345526644635ab0ecc50c5c9b41303e1e1f5b1f6161ff3f0a61f3f51dcf5eab537b55b5db80ddea43032815b7908ef405941077ae8e58627fe7265438edb56ef1b6918735c74b3b8fb318d24c30d06cd07d15f385dfd52cd11a49d23837a38ef8284140bcc827accc91e3fb964378ab5da48352949a0f4b27797b96b083028f2f6bdb579e6c1ea1809c644b8e841bb7bc0eb312d29e9fea73a71744649b830f244576a3b1b8f50150c6379a7ada43987439be4e1258efbf5d325ee5f0ad6c9d909bd73a187d299cd9d782beb7a8b2524cf2b61d2dba7e4acf6764b73c9a034907cdd5b7f547", 0xf4}, {&(0x7f0000000300)="130127c0749a951379b88d7ac86bd00a069d3e5793db16848cac09380ca3c6045e088493f74bbdd96015c04cc03eae1802359cf0a739df19bbbc910c3256b1724713e6e5c4be6c2fd26afc35a60e33dc091785fd017c569eea7264d1416c4ee26bc35c2a3ee4c8f285c9da4f7d78ed6613140dfff54f048b51827b8380edffcbbc154571185532f83a58dcf55a3657ebb73d8a261228568bf32c1e5ed7a414a2f8b30a24d5b952ae26d33311c4d23fa6db921a7464444692273f8476e09803bb860b51baeabc34a7828152bba3533e16df0300294a425b07d1a38b122b7ca71b5dfb620963", 0xe5}, {&(0x7f0000000080)="24c7145919fb2421ddb6ab6620204c0441c838579e3bbb9693a8c127c2c88f33b33f1766e6b65d233d3216d16c5ebbe3342d", 0x32}], 0x4, &(0x7f00000004c0)=[@rights={{0x18, 0x1, 0x1, [0xffffffffffffffff, 0xffffffffffffffff]}}, @rights={{0x1c, 0x1, 0x1, [0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff]}}, @cred={{0x1c}}, @rights={{0x30, 0x1, 0x1, [0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff]}}, @cred={{0x1c, 0x1, 0x2, {0x0, 0x0, r3}}}], 0xa8, 0x40}, 0x20008840) getsockopt$sock_cred(r1, 0x1, 0x11, &(0x7f0000006c40)={0x0, 0x0, 0x0}, &(0x7f0000006c80)=0xc) (async) r5 = socket$nl_route(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r5, &(0x7f0000000240)={0x0, 0x0, &(0x7f00000002c0)={&(0x7f0000000040)=ANY=[], 0x24}, 0x1, 0x0, 0x0, 0x80}, 0x0) (async, rerun: 64) ioctl$sock_SIOCSIFVLAN_ADD_VLAN_CMD(r5, 0x8983, &(0x7f00000000c0)={0x0, 'xfrm0\x00'}) (rerun: 64) r6 = epoll_create1(0x0) (async) r7 = socket$can_raw(0x1d, 0x3, 0x1) r8 = socket$inet6_icmp_raw(0xa, 0x3, 0x3a) (async) getsockopt$sock_cred(r1, 0x1, 0x11, &(0x7f00000083c0)={0x0, 0x0}, &(0x7f0000008400)=0xc) (async) r10 = socket$inet6_tcp(0xa, 0x1, 0x0) sendto$inet6(r10, 0x0, 0x0, 0x20000004, &(0x7f0000000080)={0xa, 0x4e22}, 0x1c) (async) recvfrom$inet6(r10, &(0x7f0000000000)=""/26, 0x1a, 0x40004050, &(0x7f0000001880)={0xa, 0x0, 0x0, @rand_addr=' \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02'}, 0x1c) (async, rerun: 32) r11 = socket$nl_route(0x10, 0x3, 0x0) (rerun: 32) sendmsg$nl_route_sched(r11, &(0x7f0000000240)={0x0, 0x0, &(0x7f00000002c0)={&(0x7f0000000040)=ANY=[], 0x24}, 0x1, 0x0, 0x0, 0x80}, 0x0) (async, rerun: 64) ioctl$sock_SIOCSIFVLAN_ADD_VLAN_CMD(r11, 0x8983, &(0x7f00000000c0)={0x0, 'xfrm0\x00'}) (async, rerun: 64) r12 = accept4$inet(r0, &(0x7f0000008440), &(0x7f0000008480)=0x10, 0x100000) sendmmsg$unix(0xffffffffffffffff, &(0x7f0000008580)=[{{&(0x7f0000004240)=@file={0x1, './file0\x00'}, 0x6e, &(0x7f0000005300)=[{&(0x7f00000042c0)="0bc0", 0x2}, {&(0x7f0000004300)="adf4e18f1d9b8ab32a273f0aaa8b3e31b9681a893b97a134a07ef6f9b91dba0bb7ccec423828d8d1cfffcee97f775de89a9fa4387637ff8fd0d6f7f32d68eecb3c982de2fd17320d29ceda7988a293074f7021ec1d3e88285d314f9721d44af57b474ae56abc732c9812e478d357af5eef0b08cf1fde50185e220dffe3857ea3a46f0f78ccb4d1f7d2c7956f9ec30c415694119ff86f4db5692a5074ddb6daef178d7ce5e01f611ba54ab000a007464766fa4e3845ff3ed9f8a279cef6ae088c5da94183e93cd4efe8d4e21cddf5b85fe76bd3657d7db9d8896be7b65e8e5723dc650008532549c0420aedc65c217e9281113bc3ad3dc8ad593a0b64b613f47fdd3d36e46b32010b124dfa0270f274d788a052f2b3934f25292b57954fc09a51bcf7828e6c3db7ad9d3d5d8bd162b71c5a2564e62186e0c96e7e8d1308ed264fa0fbcccd90b2095488ba4939ee7b52b156536fd15856d2b4abbe814ba479d03e62888785fc3ee003b98a197c98f0b4c7af6c58c3f3830989d97a3fca1ecc9c739d225c39e3e3e7d97f58a7051f6f9c7bda689913eec1ff028657a6e7d81265a776a24d34e62834423614952778c4da2e96b03387b2d35979282b8f31fcca7770afd761b94d789dc27ef7bd76b74266ade0ec1efe302a93a67f3fb2256a2833bb464ec16308c9cd7ee5fb6d2c129b1d746e1e43bea8ad87e06522fef027da4bfd108113dcc86ad519365b7b31b9308252f8a6d688d2036347d240d9892a5620f002c1de2f3038ee984749a0501eed904d1ec2ee4a0a84695bcf9cf2145b2ae86a3697e0a06ac3ac804a7166c08bb56a46d36e6f9e0862352e8d8ac50a8c4cf900a637e28c6ce20afddbebc82cd6e3aa89a67d12c31d9d728181b084d39953fb6b939caf91eccc54d888a863bf417bb53713415ad370e273b7af774cd46d0d3ad914074a962a20ce28d160d12e8a6c1ab417978f6b736f50917386acb7fcc2b95575e0959d442600f18c0ca36288ac29677bb7a681989e90fce58cf6a1fc35e50a1def65eab20222666e15ae51e9766a895058edbad2567e36a9356f4be2ed8a3502eb0423dd0ac92d2b15239ef86e21b1f85eba58dc0743cdbcaf2a72dfc1bcdffc3929bfc5a6f34b1bb45cb77fe2b5085cd5265cade91177032ffffedf75106cb31fca426f5345ac66bc624e35d9bc6a7c725a8ddd337105ba56d61dd3803447840a53dbd1096d477c878d36fc002482a1f82a9620bb073328338018ed71346781280b7e00d54a10bedf3b5535516f5591f882d018cc95c35716bc6bad68cb5533df0eeb867937fc72f9b03b4fc0bf902c83d8a2edfdd579e752ab75a75aaa7c3d9f1c8baa53720a1b8d0345a7b08a69c2cea389214dafd96bccd6c22f251fc689d8f08a657f7710548c91161098b1bf73b70353acb205a8bd14fc87634f1ee02c9e8cd4e20798b54b3e5de810bd1b21e53573b6f73684f258ba461f686fdfd089591807d179c9c2c57926e1b4456f9ccb1a375aea584d728acedac6dfb29efc561e382f257a68f673715b1199fdea7f8bf6de73eff734aacf64ac8a2981d7cceb904f22f8b24ca76136878f4d55d22cee5a9225de345140875511b78c1df1264380285ace0e534f7cc9849e057dbf5329707316036a3019acb0f396801dbba6ec7198ed93c44de035bed7472017af9ec4a37e174ebba8c48637033c8749c165c80f0f4585f7fab064b07e825852e450fa4f6239291dd4f4ba9f609c98a21c1e05715f8f5576263881cf8b183de45d3207e4ab616dd9372687a161fe234b72c5c7bd4d2d747adad6098e4387f37d033678bc09004433980c3cf7f659abca1390017bd3b7dcf72810fefc1ccfcb91d65a8830dab0a56176ae4fbc1b22179a508d8c3105ea5c0cbfbb87391fc33d4a3cee5444d80b85a4b1112ce3bc3eff2432ec6a8f1abb7db9245fbff203a5f84a33cf2ba5e82b2a3b8008568ca867500935cf66171a58ccd1806df4e40c909a42264efc6addd544e086577e7bc7631416ea7272f3b2e269481797d60eff296f3c87381847fe762004ca07360bdbabc8545e34a9177e4f21bc03254c146e003daccb625df3a530008c2a23e4e5c86cf48dba657905ce630e742d93d3f5f595ee08f8ad168841775ceb397e4f3d3a3880093a901216d032dba4d5a92037dcb1bfe2dc9f48a0603a2b49f4c128b404ebe29efa41d69447b353b73eb4993f3c9d0a3533d55fa405f4048cd194dcd5be4bf5ea58603379ba40301d72ca3be0d22cbbc85ac3448f84b19c61b57cd084c083125b2a688ce119bb121e59fc5089cbccfb1a451822dd4611f201ab7d1905116b14d71bbc2843adbaa5f3fe45827d6d5dadc0a600ea9e7204b019440a3cceaaec42383f7c6c34990350acd7f9c8da8d6161fe9576695067a2da9ef2e6fc839828aa460973c8676af7296dac2c3ebe9a28019a814d8f3b3d5c8373fc8a7b5dcbfb1d2fd30ee3854e743f54dca7ec6883cde9b3e0e3d6ed1cd1ce3f8ed944c78c75b3aaaee41eaf76d53ba9cc6e3c26855a5030a8fe33902e1fed83685ce1237464fa8048a5ae58e5e53225fa5971a7375d3dc6eff49139ce71d028abf853f9dcd44027533947be345b6a2bc5536367eefc30521d6045dfdb27196743f96b9cdff6772d8e228192bdf92c7737be0b130cb4011692a32495e15eefab2a96f346e376960eab4794f59523091b11aa7936d5dcfdeb32a4ed03f4531c1ba24d019d9f85f7fd4d3231f089cf1170d5cb445c92e7367e2693c145401fa701be03dc84188d8694eb0006830ce45bfefdb1d8ff289173dd153519b1939433a97603b8e625eb32366ae864a9940a2d03e1f06e87ed663d447dcf78c05b9254c5ab5a23b3cc97e3d42653d788cf8519e5d765e843f0fbb23be2c72c1d2697da06b1037a17d24dafaf645116b1df4fdbf1a9b5704f24b210c5e3a5985489e6725c48df3e3a132ceaf82c68fa609cdde6febc7fee498fcadd38d56ac189359047a4a914919da06f066be22f761611b6ccee116431484d9ed953ccd6c2e1cde8432138607310feee52fc5868376c4935152d5fc4d52fe4eeaccae40aeb9ae99ab0daafee803882cac7554c661ea84324708ee6de2ed4c95b4d52c192c2b6a22630c7eda55005dfa44361e7b275db00523e01fd0f8da85114a7cbac0dde4c94ffe6077b17b7ea6b3d698f9a3e79c66b398831eff8802544d3acd8365586d7ae4524720e1dbf19742f9e7cc0ea0312c3ab3f2cbf7334b0142d96794a8fa33e2aaa230cccd4e8816c9b42bcafaa2f96144f048632efc1153fd3e831fcf38d540d63c2d87b24123c4a41e1e3d4c62be6c9c83a4e043448c207fde0cadb025516316b1d74d800467bbf43748c1df31a375e713a1313e53b17f8f19fae6916bfa435ff3a344af287a5ac481f21f87b2fa8c6ff0d3d7aaa08ab6d3cb541f0e9a106de1a5dc126b3beb2042415ff032cc820f554c5752d12566e35b31ce4b59245fad05c8cb487527a0eca4089db7442247fb0cf9d6885f8b85cbae0bbd5040c485639f0fd6897f056701fe8235312410273709d1af8b2f54e8dc768299b3488426d1a87552b7ff09520aa3cd1f90b684510a6c0b5f93e53795101abaec510822f43316aadfe2f9cf84d3d9ac181795013b02386a821bea5d41112e07958f1c85bc32b48f97a89ead6e70071095319619d215e3b67a4c8eaacffa288865fb24c8686791d8de5a927e7145aebd1061db38b259c4b51f4d9c1433776717e5753491140cf3977eb178dfc0d64a8a004ff80c79ff411f0e3e9ba42ab58bc0d02c691cfb88e385c561db9e41cd75834e2aaeb493ee3c419194015fc4d7de15cea6ac4b5feb13cf96e89beb0f7dd3b1eff142ca856b09c4a9d18b89ac8aca39455b23724f268f3f0d5c9f909b74b0cdd133e3c79928c4e1d5d2c6d203a5ecbb2085dfedbec0d2078c9e6322dd1578ebe7602533085928a153b09adc55f7bf88e928ccd1fe836d787a29e8e5859f6450d9678865d3096706caa0dbd444961e5666e8f589ca7af098e2439768f3f4f7b7014966b8fc7fcb479e87309101abbac4a40a8a0384b6682ae75c18804202f4005464ab65f4931b84a4d59ded8fc42f1641b96c1f1b6cfc0581dc9c242b0d2b92e2e0183e02690d3bfaadbd8a23be04dda9cf88cd4d502b7e96c08fcf68ced3a9fe1c8f1789880c987608a657bec19b72210e500027baa0039f1198690bb91f26d7ade84fc146feb500c53c5d5a9e87de98815d4d0dd56ebd195909f5e4610ccc48dd1e1dfeaf276316a80d1a476f3014ecefde04357acea0f4310eb1f245e74cbb534261fd041d295a6dc3ca1638474737408474d00e21e3e9d8e84bb8fa004d403d5b14dcfb7d3400d9b57eb123ab21b12a2c0dd962094d8039b632d6431843879af91cb0d45ca014019870356a87817a49cd8d8a94e51dff9d9082abb568ca655a9eeb0df0c1b7cecc93bd905ccde0615c97af991b4deb779e9512b15970472de6ba972153d78e216c29dd355914c1120f75d460c6e894e4995b1c0a3ade9a1f3fe95ae33116a5bb157b4fc99157399522e4f78f1b60eae3088727ad6d7ebbe40c370269fc7fbf513b25dbd072d1049c1c3f0c5cb4de7d7679a50d23a518881e908dd73c5ee46cfbb4fd24bb6031ece8b1fa1996e234ecc4e2464ee7ccf28e3f9b9471f261cc6034f590d389d06bb5e3679ea25d00d3a411f648d337f9ea27387a0b09efe9d2dbc5656284c6832345a863af9a3b6c3a7a90907a40e361f17c5594e1b5369e53ef10570bb9e8c30e38c96788b7ed92ebe93e77347585ae567ef19f8cc5e6b34648de8a79e67a5b4e51f3222c8d6e6b694be1b7a2294d04bd9747e9c27ab90012089c4362e9b8dfbfd7302360851211ecb9c1b8c52dc23d10160a88b862442c0e871c153a5cc7b50f72a4a9437e114b6bfe01c3a5bd42bda1abae4088d99aaaa005b070e606ebcc73c075402b527f65f1f18bfd6f40e90c05c3e4b45a416559f0d91b2403d8cb2882b257976ddb2767cd7b2d7b7465e39f0a3ff977d973a1a37aaaf56b3a0510734f138c9fabcc2c92756b71dd6e9c9ca8f74353dba76fe94bc5e4794e43a361bfc882a895c718ef4e96ec24cf6b46f5d35f7c22b681ae4952cf41229dbac1b02386ee5c33a91730053f48202f090e0fc01fb99c1bd975cbfacc464702414aadcce59dbf76145cd418972021b6c622d07aa88e8f5e470988dbaf6a159726635ce8266eb163e79bdcb25d92425286492cd1234772393021697eb1f0cb6a38fa9859be8a6fc81643073949fe14aca7d612308f64cd3384416ed3033aba9571361a10ae030845e021e4d89617a4459397605c13a1b73fe74ca646e4135db5caa4a9fdcd27c35a88bb085deee56266401e0399c291f230f2411c3e0d7890a8bb3e074ad8af52ee6f588f028e6da37c82d7629f3f232fc3a205b8ac41d6a697b93f0f7dad836f92d7f424c7c93c5e1e4b77588a47e713366cf367d3ab829121e426b727b0383e1cc018907e4f942368ec928ea2a1b689f0a59b890c65744db0f292de7b85f3213abe26869faede3c8fd5893497f42e07ca8ff436a6e45c95844b9cb603c040f23ee87265b346300a9fd819d54e648c03a3291e2a20a17df95e1c6f1c2b19ecaefae12dc44557f077ffdfcea8eec8a029ec9dfcf65d7228889e83222150827296f9c0e2bcd81ee4e26cbece4ac7a7e5eccab09921f6e1226b172f4c67103343c527b4f96e8300b5dc22e39cd1ba13c7b07bccc", 0x1000}], 0x2, &(0x7f0000005340)=[@cred={{0x1c, 0x1, 0x2, {0x0, 0x0, 0xee01}}}], 0x20, 0x4001}}, {{&(0x7f0000005380)=@file={0x0, './file0\x00'}, 0x6e, &(0x7f0000005780)=[{&(0x7f0000005400)="5d592ba9474f4e728ba6341615bb26ffea36273ba0992d25331dc350b0d02a5d", 0x20}, {&(0x7f0000005440)="08c04f19ee3ba1eb50729810c9ec7e185d03290af8c7f019d24a7e1fa8ec8c49b3934c79e1e8073f9f2c6a7641ad40641c4aa3631b83702440181d8c1e5580cbad2fbf79d92950c7cea314b5d7cbb0a0afb78087d756e6c15a994111443e2a8d59ec8d7077a5fc94a59fccc1a72397c490e17a7288ae4239f97bf9368f1db42ece082e70b70cd567602a4535c5d8e8a104d092ff5e05debe64ab8d4fd407175a6283b58bd4214d888bdd19a6c3bf93c517777d8ea60e974699f77a1b187c769e37c15240cd0d92e1a5e9d833e73324cb1b8c170fee5a02c9da341ae09a8a4e01f0fa448f502c0c3afe7e4e3bff9c720593e481052bf9829191f3b865ab", 0xfd}, {&(0x7f0000005540)="a17df022acee196f3a0ab61e7ada472a8b42f2dbe1262db834f7ae9cefd0cad0b9d696621103de3dda4dada3f3319751ea0e1dca38c4084da884a524b7f790c9664b92d745cbc1cb352fe7d968a6f430e1935ca471a261f6074387adf333b969e2a7158fb62617d20b0354e130578be1d01c1995ff6076b58e431b5384ab093f30e7e992b430edeee329b2935db775e2af4c2b49af0bb931b2106e01139ff28d069e90629f4a3ab07c39a7152138923cc4ac1827655e3d0e78ae9be24eae725a7ad2ee86ecb2942ec38a86149fa6c1b30e4be6613fa7bd3432b5de84b63dbad0b860821f51585b5b95df257c422fba8ececb38", 0xf3}, {&(0x7f0000005640)="91a2585f2ecffddadbd0cbf7b6359eaab6f865ff7521084bbf408ab3effcf557f88a4fd780a8ee9d7ea8f110bb3dde5d2fa328137b284168d196c9bf187e1b3e5ae836fadecd004c5d41983a5d8f55dff91e6263aba40fed1ecb69ff78b80a638e117b065a44711fa4fcf131ad4adf138db269a0535099724c176c0cb20e57a63fea1d8923fdcebda03f741d96fb7e06a015e1f9b95d01c50f258ad34a041524542d0701b5d1e9de8322126ec6057200d288d4c6c645cdc43fa245872567b9860c944abd4aec534bc6eb29511d8fcd61f2e2ad45704205e573023cf44577352eec8eaabd", 0xe4}, {&(0x7f0000005740)="53d48fbc3dbb9d6e140f1d5e77eba35c9fdfbea14a60e1253016c006232ff984598a1d1183570d99956c1c7f7205f18e91a0", 0x32}], 0x5, &(0x7f0000005800)=[@cred={{0x1c, 0x1, 0x2, {0x0, 0x0, 0xee00}}}, @cred={{0x1c, 0x1, 0x2, {0x0, 0xee01}}}], 0x40, 0x84}}, {{0x0, 0x0, &(0x7f0000006b40)=[{&(0x7f0000005840)="bedc24dba49e008f9578d3c321f1aafd5116ae65c2fda5f7", 0x18}, {&(0x7f0000005880)="f795ba464eb04b3d849e41b6e4c6ba8ae88331d88590d61ffe3420b91bd3f9265dd2542e77c4c555f685ac296e3bd7082ae0e28631e430c02093662b578c0af81bb862630639372b577bbe55a3be34a6438e4254edef392294c505bf189f00623199212f873d54a9d3", 0x69}, {&(0x7f0000005900)="1d03969f0e6786f444440614743c78dad492038d4e7d6de69266b4aa66b47ddb7f06b51e77ce53389b48dd0a7211f43014d8ca872dea603abe32971024817a62b3509d9cc13772295f5792aa14a3fb61d33490e994b6c369a6a913a9507985f2d371a4b3d20e9c9e0f27cdf729e5959f7edf01ccd17c5df07a3370f976c98367f93888f88461fcdfa9772de3eaff44b3", 0x90}, {&(0x7f00000059c0)="66444bd856da6222775540e315e384f358c142f834fa7ff6b54e34c10a6fa940b91e1a5cade9b32eaca4693691b65813ecb9989090e85ef0fa5ab48152ef1e4e87b2be50b3537ade3bde326d9460a2b51096c85d05a73a3c97cae5f7b973b086e988e70dfd91a9455943ef3e618a02b2ca0f1cc8d5a1b6649d38f8db7927045f11ca37b90d05297ff4f2307499c70fec51822858fcd89a17257ea15c0946f7d5959bef8f5d720c7e58e13b676b878ed39e29c55dc32ff91d28eb80796d0aab9a3936f0eac6d85544d90f2702", 0xcc}, {&(0x7f0000005ac0)="e38094f454c376337e8b863126328d4ec1", 0x11}, {&(0x7f0000005b00)="548b0a55c94ff39b5893602d102d271c195def40708417b7fe21c2b1167c1d5b424d5d63ad40f474476ad6107bd7850a2ef611b5268a8094882c13afb419570d397a2dbb46c7724c56ecbd149be0ce199524e1c33eed22bc26e738cbfe86a07c83a94fc483cb9ef9b411e868f112a8a4684e056a2943fddcd764b9730caca6510eacb6f278c0360c9cdb7f2d3b6e6cbabf928d67c8448d63a959348429109c644b4099660b5b22e52149b4fe0d8af2adbdebf136e99f88585ccc26d108070ae58c5f3510c66fa03df700c77a9d1823de447c4cab2f131de6fb800a87f005374bf4e40084a2f0c476119770a04aa9a0bde42f0bdf69b9158801d658be9f31fe01dae6292bdee540030da638426c386277a8efd4e7603b60e7cc6d6c2bb43e188856d890865094069119b271604dd8b39085a1a4f00dc2e4f0d61a9f66fa0f8e4973d4a0255f760406875b9eb7628628f3ab48a1fbb270ac07ecd4ccbfe06d5c2b090f5752bcb4b7a6241f1fc1c482e79d5b2d217ca76dcae3ac3ad5b5ce355d81fa42b028e17e84e1f1bae9acccceef9e85092baca8c6f4f8170c85bfe65989c968a945bc136cadc3f460b14870e3a92df5339e8961660fa43711670386c04842601165d072c8d1f5edbc6fdbe29ed067ea349a71a8905498f66ab04d6326ff30dbd626910572035b488683bd5200210020959b6521dbe89fef399cd0b215e05acaea588ad35be98a21c9e64410215a56fdb38141505d879c0d539d0dd80728f2d41bfb523f4177c75253fb40c9dd03754b86d1aeaf3c1580ff235e0c9984c89e575df6e9c91de9e6b5912c83e78e03e20cea081fb0e5a82b3814a95f4b86f67c7fb59e6a4aa50c059b4d01dcec078a7689f850c8d7d36bb6c7e7d0363de2de2aeab990cebd9ce2124e834b518c54408c770bf8d6b262130d0e04699ec623db2818b3c472731a124ae4d42021a0b4721eed496c83aa3629d4aa2d2bb0cf11602cd644f72254005fc3b51f7a59152f64b190657b8f1370a4a95aa6307c8889f0df13a0271231df9df0747698ecbccfe1bfc3693deed692d5acb57d0aaccdef646c40bafca7c044c00ae8a338a95ba4c29d45bc20422a5cb44656479bee4c1a07e7b633c447b0f19a7e9dd5ce3b26fbe66d6f21c62a665d2772475ddbec460129f7594c0a749e73b3271770c4a9b53a41adcd623d5fa2dfa5ebbd0d50cbb5e590dec93615e604f0e9b5d171571697aefe9f6b539dfea5ab8cd8720878b61a0490e14e1a7fa15dde65856839471e0ef17263cb8faa9e90a18cb92ceeea45611a9eb3611730574db7689a68acbbcc25ee178671baaf77e32022ff1fb094008ffce07c2e459c283861dd4df3caeff033b7aebc59f7c19f901fe30cfe8bd4488f73b05ef4418d5d0e975196634352fc4a782876e93f4dd0d826d7b4fdfa367e45459949680825515b8356ca0d66c57b90761c5aa0df445d0dcfe8f81c50a7bf69a4e723aeab8c930bfc51a102c6237e8d1c82f11c50a46dd8058990e94d223262252cc14395b3cb9c69b970f8cb1a2dec6701498df4b22c4f8712a361b34176be95940c9826ec11bb8c6ac7b4a35161ed0f3c8e924a7765448b8cf6096f7e2f1ec0a1d3ba67a6dd3f1c73df2c6e4942931e7adb6720c82ede735e64a8c645e1a2d790c9c0c17874bb7988424a5871d1d465fcaa9d6a36e03cf5e2a0af120b07531b6a973c1024f91cef25be4d218d8008197369757e0d74fba417a435c4002cd211e19976eac67e3c7231328f49dc619dcfff276aec1740bf21a4609acde2192025a04129adb1c1eaeb71bb891d1bd633b8648693d58ccf88855801f83817958cf1a2f39e9701995d6d939d1896a9bceece557888293aa003b0a5ab473242ac077dfc96924363d64c76f6fe07823adf0243e5f02a8184895e72166eed0278e71c51d44203899b72711091fb46995d9c1b8032dac8b968176d6fd2c00f50919d7bf4f7f2c157922856de0a29cbd7aaa42b042d10f481910d3a678fbb3c075a8f43bd16d328729466a2919a76d46d70fb1be4855d2cfe211b53616545509b2426b76544c6e1b2513bd125aab4120629a3b551bbd6ebd90ecaa5a9c898c7c5d7a88598fac183f41f6a52602980dd7ce20f1e4574ac2820885720c92b5e20798f3e7b3b82603f9a4464f727bc0d0c569a070737ac12fbd82594e10deb906c1d1dabd3133492cb2ffdc43166d34bcce7294a4c3aadfceb603791b8417a9128eddf382f9e2c49b916864837554609ef9a2f8c2430f3e5d5e0133bfd232af3f93d007f259b8c4ef850d3e00db53c23d5b37d085d38ab742148e7a9425908ae4bee58c74002a3e186554c7313be11d48732cf1ebafee5134366c46b2ee06ff477ee090cca57c671a6a74d2e76f4d78786563547f74506a3c8da9ee3eeb534fb2a15e42e90b0f01d83190fdca7be281ae9a2fc1136f4f3d711591895c65d1cb424903547645624763e91b74ee71889f50610086bc6007462e24c23fdddb1c5d147235e85e6ae0e356ea19443d0a80c76b7924b64a377e620cb767b8059dac86b20185eed2372cee82220d2418407cb38b2f99e26c5467da910d9a4f968480a080fcd14fb5b2dd424e6b1df7304ea730adb054e7c072ec69636d157456adcb5e1d7d1b34fb1c5711d9387a1fe276edfb3101514343789211f23841f5c6565b7345cec480afa002c4a0c23e14417d5f72144f5aa4795957a2c0ac0f54f23b86a5907c9dfd79cb7957857a63d11e124df7676b1ce0c50af48f1f22a30aca08d62c58afe3e5199cb870a2d644d0ff480b07b9587b2ffafc38bb8d6ec307c4c797e376a7b4078052ee7b52ea4816f73d087e88ccf35604c916718f7cf9ef2b676d3c5fd609f4c28dcd39dd476b3c68f7827993fb71e79696b0c3b338fbce8a436ed5c11dbb903c9e83ec7d31cb14ca6048eda714abc143fde49b9c231ea78eb4d9f92fe89a124bebf08019e2f43f75a91c9e884f2dc3240cfdafd446f08ff3c304894836b44c9b78a8f06317a354f6d26eac23871d9bb865dd76cf6b81c830b12ff4cd12ec0e8486b1bed4f214950c238b0841b85369445a7fd17ab43b599869167200e242ed9e6c1df344391092da46ac15af337550307909d6b586be23a340c18d8b47731f3a7dcfd4041c60a3e62dcb1377808079c8a4440a952140e8f69fd2efc69ee4ef20253a4110e2360433b5e4e066a3a9afb76eb412b1bf20faf4fe2c4ffd696dcffb812030dc045fe15e68efd591c32a896912f7f9c77735aa17bc37351af23e5af2a59899054436c63a6cd50d7ff0decf0f7bcbf4f20ebb178aac3b8b26ba792206b6454874b797fd1907542a6a306f568243222ea4dc12d21a81f63100be278c2e14f05489ed9b3d9a748a6ff27cfcfe7419eab5a7cacfb8f2936920bdb965ffc2d5546f98dc9faa18bd74bb2456f54852ec5142bc6f31ab199cc40c0512f8192c219cce4c88ab6b984f99b43265da03608ad5815059bd7740a594b992ca78607c978ae31f2fa690aa914c028edc8ab687cc8fb21c5612270a31a110d24c012019cb68f4ccc33592565611e73f21335e4887d80afd2037284fca6b756cf491136b20b6399d15904f1d42f583ab1a5a22caf0003f537f1f19ea3485387585b8441621939ad00c5834e212b3f09d961239573b512607d84bfb9dd2504bb524085f21edf72edf3a25a244832ab4481a007deb2d27c4c57ea6b3647643a400742521d44f8cff6610087bc99cfb8a2c72a5ce12eee8a38d75bc1570f95a5b8dbe1d298d07f667e762ef8a194b811035597aa9eda760f082a7741e6ee2b6a493be793124537c6b25b0aea754362b5c5b32d62c2619c161053d089f3e9cc8e1fae6a2658c198b389903bea459af41c830a05620d65a0e5a4e90f7e751da9609157adb1f3d497b64a420eb2e4a9549e2d935b5557a899cd3a9081cde0b7cd812d9720ed1ba568b1c440dc3016eebe01aa8d244d18c75076bc04b49439fdec070db2e3984908ad4145bb422f537b5480cb905ff70e86a3c28cc20f55ec4b4f28a8f7285325e918c62183ddc8fb172b2b6ff5ed3bfcdd600a8ee00352aa060d3282db5da0c0f1b5df3d91550051c445be5772afd9e44dc4944769bba103800d2168c5adda863bb77fdf7b44bf67255f6ca4c5c8d1ca558fcf876a27a6a3654d6b7cabe7b715022ffe5a82aa49d25b31d10d50314fb84567221374892b9394a673c1a82bf0441167e4b5e906aa303c90ccb6ee7d5268f66cf330368bcb147b2d00f71a6a019063ab9a726ee4b0094c685ba601ab33dfb8e9c37c3e5dc04bb1ea15f05c14ed1771757e95c4473b51da5210a78a3756f5c9fe7db65f18503a703a572d52142c1691758169b76ebe3d4dbd15f4d55d4b141203ce06a6deb3d26e8c62a58ca26093c415f08fc01c34ba4d6ac21abc2c0cdca269c89924f562c4a1ce81fab80623ee0841f9dbe7e4f426d95f48e3277d8950c0f4ab172e82f3a39a18cd16b2f6f03bfd8671950dec0ac22ebf16fbc9e8434e3c4e17fc643bfd9c8d2a1abfe44bbaf0a9c77dc229c54cefd51092ed1db2c669e77f7c5545ede091369671d84fa34e94b0155d8313fc46c5634d493aeb050d1e329c9fbc11ab5eb9c5e148a797c11622e75cea695f2f2c5fcc8d31558ec409dc0772b3c5eaafcb9f34b7b2978e95be78177f9c1f8629d204ae66b0c81dbfd64fb38885d1664e3b241e6872be11b190178d7c880df2495deb2f8dbe6233b52191b776196b7835e3dbe6ae4f6ca2865020acc84e111558c25c270cb9f500e1fa092d925080108dfceb88cf97593baa48b244542e0875550c5082a2790f1f061eea3e7ffefd907b1df212e16cfac7bfc692fd023a675a5dd66ff71ae35d75a90577dfc01333a562d9561257680e231c0e2c66026f9ada2c3615c8b89910ac20489850b3555dc57b7052fc07936d4bbe73b71ec7bd0daa453fa5d6f4eee48729879c58455dc73fb5ffc6d385b50ee7c68c49d5b17b1b00d013831932860e6f688b6cb1b6e4559105d291ad370adc3d45da0c0c05cd3570348127d005c402c8de4d04df399210a8b2bc67ab212758f40d03e8b6983d6fd7117f77b4ea1e603c6319868380adbf98bd5dd9182f84d8cc3545853c52e9874f293f304342545018d566090970458c6fd6a473473cfc160aedf86b7eab2815c74bdfd63e98762191919f11b0c20b8d21c5907d8bdad0cc234b1db060bfe8b56b4eb96baf28d6425712d4d895cc08e0450e3044fc5abf7000d63dca8ccf4fd34fd91d27c0d572253b30f9b1962e9b94903cf1d8e44ab35216d00e50da4a8f3d42b9a14ab245ebbaead50f34acc9dfeb909ee8d9bda30aa1db78658522cadb6ab34a772e86ea807ef72a14c897edaee057fe6ca0ad34f26b592a5934332e790d1adef4a7a23f25288f03afc18c885b122351c5b7bd42de14a51772fe193dbda60dee7820b4f45c135e022b368556544e1b36cb4459fd92b622538501e36eea0ceec51d2a664985139cd5124df8f018d4371bc873e7f5d9e3e5d74e7e89a58fccb642da52d9f7051ad6e13cacf4047d8f630270fd3eb05788d221a92d8e39d0fdd77eb0c00a493c038145dcd527792a61eb4f299f19ff9578c9a83381a1e4de1e39076a5851b650c051fadf6b5cdd4f5f614bc5a5f6572eadcf83e23d05c43711e99d8d747a5d59d25835bb4dcfd9471fb860abb42b1e3fb27ab1670c36e6313a49660ee0f4a100804be0ef444536fe80cc8ca08985eef8a49b05577bcb078888", 0x1000}, {&(0x7f0000006b00)="355847449e8ca1c2a9e8898c3e898a31f1ae35ad", 0x14}], 0x7, &(0x7f0000006cc0)=[@rights={{0x38, 0x1, 0x1, [0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, r0, 0xffffffffffffffff, 0xffffffffffffffff, 0xffffffffffffffff, r1, 0xffffffffffffffff]}}, @cred={{0x1c, 0x1, 0x2, {0x0, 0x0, r4}}}, @rights={{0x1c, 0x1, 0x1, [0xffffffffffffffff, r2, r5]}}], 0x78}}, {{&(0x7f0000006d40)=@abs={0x1, 0x0, 0x4e22}, 0x6e, &(0x7f0000008040)=[{&(0x7f0000006dc0)="0e36e4cf4e414bd7783e1ad6829fb9ace250e78d8f19cc8301642d8bd1036c69f08dc00bde392bbc4befee3ec3bd09a688d20928eb88decb0c2eef59dde5a180210c59fd595b58e74f9beb1a08d8bfa61e27614cb42213dbfb106f1de50bd7414a90202fc33c38d9b0a0d4598c6fce497565c805dfd80fa5784755b79df6f89c8fbc7f36e6f163478c7fa2945f3d30e46e5fc85d338039", 0x97}, {&(0x7f0000006e80)="19d213ef10cf7e9fa4a1a2006b739a7957fce4932402a0e6123f5da2884242e6f63b50445ded73228b5731bd10c17f3307c71ad03f9617ba0b37ba2439afbd5ca9cb4f03a71736bc2c2eadddba3c0ac005cfcb031452f828b190787e0269fa264449ea369be0564d523d5eb81072c00c20f42f686cf6fd49823084bafca0521fa02899e05cec27bdc0ea237491b36afe8caad2d5045bac8dbe200533d89bc5d1071effd4fe442f2187d555ac8096d663fd6ac1123881eabb516483a3d72d75a9052a26aa870944a6d60540679a98f8ebb4d0428997fb8cc44e35b630130e090bdb2a10701437d6ef86771d85e90c4d5933697d04f54c90177a04ad45b13ae26404e9f60a6694e11c208cfd34e69f9027dbe16dc9eda0892430765d49fd0a65a37b7707b040b967df04ecf5159d6800a4d8d749080c02be772eabe9ebbf5ecf286d132ab406875158418df9ea37152fc0e3b758a075d0d3511f2bafd4ee9e7107c5395c1b4fc4ab442885890b75747f21415ed5cf547a8e29adc7c1bfd7f8754ea27d8ae5fe66604f6a587fb15c5436331c35bbcf03b30afe8d993b06a735c7d3b23ed58f9da2508b768f6aabf446218bc32074ca8241f379a669786ef2435e70d2791113aa47b7ce8a9fa5ce7b506c907f88cadf566a592b33661e0ceb9cfc267785d4aff387884cd02d6ee2ed5efdf84a9fe819179c0ae0187100fc51f53a3cb2114f1f851758ea14d360062c00d6ce7f0dc1b5b184bc4269d4af1110e2af0045c1ec450b3830a954f4b39622adab39fc07e7d6fab61f30e5e3c505ba15af565b75cd42a59cbf3ead159c4c334bde71bccd515a9875e719b6750d323e9fd44892f7a66870f47077f3c22b1b478918c804aacc811eefabbe838628124eb85b755c92a232bdc79d1981df85d281562b7d16bd4dc97b15206c87247cb86119b0dffe6b3863e7396f372c3b76f542b2f5cda37d41e9e296ca021cee0d6c14ee6c65d5ae560faad98e46b7042289e0b24f7f6501e17d09a89039fe0e5aa26fce6f0db8196926cc7e271a1557daafbf559a89d0495bd90a9f15466cd560467db32f04174974e65e47bd91b56d1d8193285838a7742388a06fd3aa4e98773734a2a9dcf9a5b849bef52a9248a93c49a909cd64d1874b844097fc7edf0b25c6a42cb986640d78fb0dd1a6a5a80369ae11076fe2bfd97ac38f35b339435b5a4d8716b0e35bb2d8b7ef7a9207a6b4aaf4d2cec6af2a00c19308fa18a1ee510b2926ac2c93400c5b0d3a85ceafaa799bc57a4e5d334ca69baf347c3fcbc46bcb4530507e999141715451cd6e62fa800fb3cd90d99496560cae34ba8b8f62dc0f6594b4b9ba06009990ad8ddaf387a5fa83e1930faec4aa7eab8a38023c0d79c4f9815601f2fcefe7dfe36af7fb917dc0b6f6a5018c4274e86cd9d67cb5b3515d82e487294dc7a5d8f87b5b2a8cd5d781558d9e3ef3248b13f62851c064fb2edc0fc4f1992bb1b888cdcf583e5d49f467f388a0332965c3840eb318215ef967b627e6a62f44eaa5363e0cc8e3ccab4ded1325a22503264c268f9817b1bf16a8ffbba34b4422f30ef8ffdbe820c80da362874d5ee42ff01c67893babbdfbdc55c5bad682ce0b701745cccb38a46d95a1e82dd6d1e6299b28e255c459b6b4936f7e55db29a4b9a91ae8404cf4baede161b8fe900fc23b66e29c0aca38e625d708b24d52a1d96c8c3cbf7da2a4317e518ef857eead7dbe65d381b77f584c5e79afb7483c3c096c4167dd3b732751841b934b4650add418af4ab963f504d77f1f0e3622cfa78400f77bd7d0a5490ea9f12a47a034da54cccb098fba8f465d467be3449543f90414a17b9abdcfb69878691db7bdc21471fe2c923591765f923c6ca396d6c1f8adeb42ceea508ddb3237b5d317757d8130ff8e457f40dd220f50a20c946bc654a63c4c9a3384d655a9672c332a92e3fc83eb961697996eae926e6184da962af73aded4a865267e663d06a4189f6fc3a4e69f92bb9b8563c7b78378bb678c5a8f584a361323ef276bff5eb7fd1e8f5d4a0b649223de7776002dfd493caf3f1441f31eee3e998b845c408dd981fc851c1950046c4dcfc877bb451fd05cb71e031b40becac0e0bb3d4e08f6d71475329699705512dc1612b7b2f316efe6d87c97cf466559219f0a306366d1ff2fc103b3a83b1b57042a0b8a30d87185018fdc54ee955bcc64993d486939e5ec7567252953a8c8be1f2724f6d2f3a5f12d173914c3076dee147219fb6e5b23ba2d5a8c7cc384f05f2b3ac4270a91e6881256fbfca6cef7a5673efa9b08aee528236221090548cbc860915bd697baa4960943ee847c2735a31ee466f1ac0cdc62c87863365d219fa258b7193ea0c5366d5b408516f2e8d145c9f9a84e469463d066ce8c2e7f927f965da3bb06d82a1507b6b9c1dba26cfe738b23b95f96c93aa0abc4fc9ef970e33a56c8d53642571ce29f3cd207eb24c11bb2199b001b29dbc0b88a8df0e945dce6ac16a415f4039c66566a505b3b521970fb1d630672a0d477a7e5698b5a0bad47f52b694217893ed9b27c14a48d15c7571681cdafbbaf29ff7d587e89d1acfa2e4d1c199694c8b395a4bda36232bfe92f9b5a9a4c48560e383d7367e6e8dbb6b8a778671dd2ed7a32f050293401c37b6c37fbe00d35b48311cfb38ad5517299003521cd41f94f29c86cb9a51b0e19e947631e61126e0f23c14ec70502f57b54c7d9b05ac683e97880f99e395be449646e8ca945f571d77a0a84d0bd0646f27716c40ad7ca5e97377065d195e1d5bcdf1acae27a11af3f574931499dbe5f8e03150a249167f2042abf7b41eff4d9d7448ca07c473c7448fc35820fff76ed8bb77db6387c63b89fa1962845e51d49347bf0099c3d0f1552bb9a84e6c89a66a0e53d4e729364336fe91026e7f9aa7e78f9ece080b0c683a462c0d537982df591811527def889f5db7f86b69c5c99c98f76bb0f095a689f8b9fc70ad6543dd288d2c646ad124d3f5e46f510b90b372c4d35d757b1a8cb84a4eca9f3f403d26f1c1a8097428510e6a63984a0ab07786bad3ffd9a726a367ade3b9f2f92e2f9c4d6d8096e88143305140151d987dbc80963696839f2abc821a7a4a6da861bfcdfd5a990a951d05086575f8d6080cd63d53d97de54bfa5df7843bf5bd287db9a29a1843fab25b911cb54d5608d20cd19e5943222fa71163e73ce67f237de784c1578799d44babbe701abb557750725f25fbfd080775e86c3f71b700865b57465b0c9cbd993c4a316392b5a725346c16a066513706007d2292d6e1095d52385f21a62b24fb4b6b7f21d9f8702fda83e8352b7ac332ec2df14b2d09c3811448d46dea57523d2b31f4e6c3bf1f6325a32908fecf8cde4aab484ed13d6eeda7a456542d9942b03c1f4c054c37d9e26089990e73773310165f241bf10a6b7dc1f34d92565dc8c1ab408feff2d73e05dd3f415b8ad06736409d45d8a134d3ec5406dc3a4c01b11e1a1107eb59cd995f604ba1cc05b42e08b895f060409682f40eb561fd66e3ecc0021009f2ea20c2c1478a2e9b313e8e85d0c372821ee1623279222c395395c0bd255379928ec4f609171395d967ae706468aaf2cbe7bb53928320e6ce4d6d5958a4e28e55a2e8cd5d9fa974a2190e395bc0dd6535590758bbbf00421f8097b1e94f26fc52e582d8cd6c4185ec95eef53ccfbf22b3a1ad50911fc1e2c0e0934626bab2598b35467c47ef662d67a5a957fbf0fc334a127d576dcfbf498307529473f770ee7f4d724fcd5d324c4dd3bc675acb3877865b78353d5bf358e21694c9a10faeee71c3f965d5439b8fe699b20eb96741da88a9ff57eb7b210c79d8a744d21765613bbd0095ef7d0789a11d99873c38a6c9422a246cd89fad72938dbdbda23119945ad3e8e09db4863c1cd156cd93f6a587b7d6a413cbef77166c08b1c577072403c3c223fefab025b2c26ffa296fe7e1e5941cc9329e7ac6e19323f0a905716da36841e0916baf695e3aa7cb52a55302ea1281a895e5cb494c1eba6cf8fba751fa1a72bf102ab890aeab62a2c7590aa30e294964b656b4e112dd743fb330e87e2f3d6f0724a1137ef7eebdf8aabc1f26f1cb3235ccc7af35b129f453429446325fc60fb9929bd9e65fbfe44e58a4e12a13c55056cf9205ac63e26908b7a2c63a5a518ecba81fb14428846cd9d7b5cef0fa0b8b878a7a1e17f96a4dfbff29eb5c389e37b9528d8e943a0b3965758f7d5fb442791bee4d83d35ba74a6049b34b84ac158a0406cd5228abe61ab0ddc3f1553595553f66a2810218826a9a1868d486957a9f14b9f0fb30c2a14c7ff806044cfca508a61053019c6ffed421083370efdd3f61b628a2da1c82f7475209619b61ddf40ed4d4a0110156c52e849b54c55cf5d4335469ed7ff9d48d5d48f2d9cf29d4190be4f1994ce4c9f241336fecac84459cfe3f7dcd7a5aba7a7be4942bdbdb4a16f8c2a9b7d20a3d455c17515faead7ca4b40f935c7123d7da283dbf0371ab67b8dd5875519c2bf5e0579637a7cbdd8d094839143be38b98f9d16e458761e8fe0bfedf6cc7d1a4151927a3009278287c2f41f7df79e04b7faf13a5e7f0262ea64b1b9f27123cd827416dcdd97d006bdef1c4b8e94b8fc0112f63a1cb04fbb30af79dbf3162c0bb1ac28ae461dd72dfd7099ebb14aa3e10e8a24479438fbabf0f3f79f6f0251207aaab5891421f7b7578b9b5ae859d5a8b3d8d986b5d30da4a320763ce9eb8404ca000a4f4f43a7b3fbca1dcef7299265d1e281bec2881fa5d14dc78571059549665ed4f12cffa0dc2eda64d0adfb1cf0fb729ec6a9347ce8218626620cde8945bedee6b24fa05998f438c079a3e49113044d500aebca46288b00e4d59f56ea5cd328247212956556bc5ea77fd733de23ac768931eef324ada06c67dfe8fd92cf6690d39949f2df581265d0323807036d992c311c26f2fb969f3be650ecf8eafa16573f2563a2e2285f4a604ae2b00f89c980b8fb662baa724213f48945afee7e3b27a382b54814f460489c335cef2b6e94f87ac529a20de6cdb5291293522b2378b369e43a33edbf34d46f8cc748ec94c12d400de20cf7f2fc7e1d5a3ec1960a63287ede22ea8b5866fbc0b6a9bf0e0fc32cac172c8e59fb0d501e13391f39b4eef88c234550d088fd7a3f132581b9a2737b25d4728d4370a77f64c5d5de14e2efe9b5af4ae1437abc17a288ec3a7f5274325307486956767734260dc3a5f4a77f563699bf38201685cf352b73765687a4a8fdb3a0ae8703974c1cf7986c012ac3d089c030b9bb69066f4f5fbcbfa23e760e4f1cdb07af94fa82f2fbcdb527a741eeeea63220dcd360ea7ce086b30a2778b9da1a159abd99a4ec1fe7a0fc72cfe8cff91c75e3e98243d69686cc03e8a80ed93d87da2b67a3d5203592ffda096cc02672abcf502595a9c1d5a6093257e31d69edc8633d96d824e58e61fdea156e08d70a1da4b212b4e218caaceff6be971d59629a8e71a5bc08dfd7b4cdd5cbc0bcecdaf23503bc576ba73ccf5edd01483e133f215b41fbc17bf419ae1676a2344c280439f56a449bfb43d94c1eaa87c7bac96f6b835886e69a32548b74703f4a527f82a34af01c524195021d37550bc68a0014f822dfddf1634d55a73109bfbce6799a29b3408bb6ddfc1dcaf2b45cfd9d629732f5ada1ddf02cd15c3d4505611c5dc09ebc35a0a3ccbafb96df5fc82bf591f9da70784968d445c01b367010dd509d2cecb07bf179f74b6cf61a7e201e457200e80f", 0x1000}, {&(0x7f0000007e80)="eeaab57f845ed24e38e1738779b9847a09a5526d9a0348a299f142999dcae6c4d19012c19ff42c6d3a5d8f516b4665f522843071110d74dfeec1d371494f2554c7d7a1105bde75f9bfc1db17e3a2660604a41334a3f8445fb836b2039542ea890d95b9d022890079fba8f77a5549002f95ea80e0f7020b64618c35e646ed5c401a189e180db5b5eaef", 0x89}, {&(0x7f0000007f40)}, {&(0x7f0000007f80)="ece25ae9fd4a82800376ff97ae32a4eb2aa35fa93c3d7009f9be16235a48f368da6ba2607a28eaac33370b4d0db6f6b0323641c1f2a806974ce65311e21438275ef492b4bd96bd977a937064080713ab17d8f2c8be3bd40647822b2bb812908ece370a7134918cf8b4864d51cc9b387e89c5144304631ca9e3efce3231a1cb0496f83e0a723e269c495d87fd4f8f294e7efcfca12023c2aa8991879775b97a9e47af4d709875a91ba5271578a5ec28e00a", 0xb1}], 0x5, 0x0, 0x0, 0x4}}, {{0x0, 0x0, &(0x7f0000008140)=[{&(0x7f00000080c0)="32d2682b5e5e6d13438683d3fd48b6424a463b273a37120fac6504852be18154bad8934be23bc302796c1ee3e8705bb7eb212415234c7ecb0c71143c6caada9958a23f59e5c58d0ecc4cd34452d43602facab9cc", 0x54}], 0x1}}, {{0x0, 0x0, &(0x7f0000008380)=[{&(0x7f0000008180)="ccb2eefb57d1df2e07193eb8480dfe377e9b44b4e4ce855176f1cb1ef85c3c454377fb92ac760f8ee09d30cd521555be943b9a1fc190491d292ed608eaf91614aff82466ecaddb10ff14469a904796244e98ef82f21b8cb1a23e5149de2e2177040d26ef5c85e3de35b9bf1a4746870127de43a8f4c3ca9a48b74488b9a61d26ba842fcd1b39f892807febee9f1b8a6108ed02e2728d6f68a70885876c4253022820dda1d7564e39864f8da322ceafd0eee3e4bcc113e1dde2cce692be644685c270c6c54fe179a8d115574ffef6c7c47f4c3b7ea9235da4dbb72fe7a77b7239dca21de044a946955e8b40ec60d60fddf32a", 0xf2}, {&(0x7f0000008280)="16faf5206b83c50c942069b7d96801cf5edfeeb2b01774fb72a254870f9c7709ec8bb051901ab8b4f105798dc79b744ace4e654879fc2f808592228b30d43cacc514279c95531bbd8bbce713a3ec87021c7a4668abaed97fe991c54654e44f094b56174d701e4059a76a", 0x6a}, {&(0x7f0000008300)="00c9ed957c06da8dea8581ea7b2e2efbbd59259d47d5873b500b009a5769166006a19d86788731054fbe1b2a0b262ece2f47a50e01b7ce55853270cb474373997c", 0x41}], 0x3, &(0x7f00000084c0)=[@rights={{0x24, 0x1, 0x1, [r6, r1, r1, r7, r8]}}, @cred={{0x1c, 0x1, 0x2, {0xffffffffffffffff, r9}}}, @rights={{0x14, 0x1, 0x1, [r10]}}, @rights={{0x18, 0x1, 0x1, [r11, r0]}}, @rights={{0x14, 0x1, 0x1, [r12]}}], 0x90, 0x4000000}}], 0x6, 0x0) accept$alg(r2, 0x0, 0x0) 01:55:32 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x116, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1983.029683][T28468] bond1027: entered promiscuous mode [ 1983.042030][T28468] 8021q: adding VLAN 0 to HW filter on device bond1027 [ 1983.117674][T28470] bond1027: (slave bridge991): making interface the new active one [ 1983.130291][T28470] bridge991: entered promiscuous mode [ 1983.200369][T28470] bond1027: (slave bridge991): Enslaving as an active interface with an up link [ 1983.227661][T28474] validate_nla: 13 callbacks suppressed [ 1983.227680][T28474] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 1983.281991][T28474] bond365 (uninitialized): Released all slaves 01:55:33 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x1e00, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) 01:55:33 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(0xffffffffffffffff, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x262, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1983.393171][T28479] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 1983.429768][T28479] workqueue: Failed to create a rescuer kthread for wq "bond532": -EINTR [ 1983.480056][T28480] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 01:55:33 executing program 1: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r0, 0x0) r1 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r1, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r1, 0x0) accept4(r1, 0x0, 0x0, 0x0) ioctl$sock_inet_SIOCSIFFLAGS(r1, 0x8914, &(0x7f0000000040)={'team_slave_0\x00', 0x4000}) setsockopt$inet6_tcp_int(r1, 0x6, 0x19, &(0x7f0000000280)=0x3f, 0x4) r2 = socket$inet6(0xa, 0x3, 0x20) sendto$inet6(r2, &(0x7f00000001c0)="73fe3a96339fd1ab0b0212f99a46a20dc4e309aa2fec8573556b94f492bac75bed55bfaca36cf0fbe00c0a83da6ff91584ed0e5a9d093a566741a5ecdb51759c4ddf43b0f27ecedd7ee398e55de82ed24c8ea6735b2ae9dccb8fa82a088e74", 0x5f, 0x50, 0x0, 0x0) accept4(r0, 0x0, 0x0, 0x0) r3 = socket$netlink(0x10, 0x3, 0x0) r4 = socket$netlink(0x10, 0x3, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(0xffffffffffffffff, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(0xffffffffffffffff, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r4, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r5, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r3, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x4c}}, 0x0) pipe(&(0x7f0000000100)={0xffffffffffffffff}) ioctl$BTRFS_IOC_TREE_SEARCH_V2(r4, 0xc0709411, &(0x7f0000000140)={{0x0, 0x7ff, 0x7, 0x8, 0x40000000, 0xffff, 0x7, 0x0, 0x4, 0x1, 0xffffffff, 0x9, 0x8000, 0x7, 0x1}, 0x8, [0x0]}) socket$inet6_sctp(0xa, 0x5, 0x84) ioctl$BTRFS_IOC_INO_LOOKUP(r6, 0xd0009412, &(0x7f00000006c0)={r7, 0x4}) [ 1983.535592][T28480] workqueue: Failed to create a rescuer kthread for wq "bond532": -EINTR [ 1983.594322][T28491] netlink: 'syz-executor.3': attribute type 1 has an invalid length. 01:55:33 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='blkio.bfq.time_recursive\x00', 0x275a, 0x0) ioctl$FS_IOC_READ_VERITY_METADATA(r0, 0xc0286687, &(0x7f0000000000)={0x2, 0x8000, 0xdd, &(0x7f0000000100)=""/221}) r1 = accept4$nfc_llcp(r0, &(0x7f0000000200), &(0x7f0000000080)=0x60, 0x0) r2 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r2, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) r3 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r3, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r3, 0x0) r4 = accept4(r3, 0x0, 0x0, 0x0) ioctl$sock_inet_SIOCSIFFLAGS(r3, 0x8914, &(0x7f0000000040)={'team_slave_0\x00', 0x4000}) bind$inet6(r3, &(0x7f0000000340)={0xa, 0x4e20, 0x800002, @loopback, 0x1}, 0x1c) listen(r2, 0x0) r5 = accept4(r2, 0x0, 0x0, 0x0) ioctl$sock_inet_SIOCSIFFLAGS(r2, 0x8914, &(0x7f0000000040)={'team_slave_0\x00', 0x4000}) ppoll(&(0x7f0000000280)=[{0xffffffffffffffff, 0x4}, {r0}, {r1, 0x5064}, {r0, 0x10}, {r2}, {r0, 0xa000}], 0x6, &(0x7f00000002c0)={0x77359400}, &(0x7f0000000300)={[0x4]}, 0x8) write$binfmt_script(r0, &(0x7f0000000580)=ANY=[@ANYBLOB="2398769586de1813916dd5808c2520a967acf8ab1d04cf15fdac9483a5f76a4b263c2f4ce6280a3c2e300a00092a7c2ccb42c175adf29c0000000099951186e7e151e101915a0871d2d8482174ac19c11c1689efdac0dcb04db268af227fba965df4342c888f92389873e846401662af37f6edbc380b84f404d61fca1d0f2f4325867f7a4d998b52040887ffbab2b1fc4df5d3593d791a8d6b0521d97102c33a4484927d3e7af3c7851531f8f92a1d5305f96240ec65577dd72d01b2f988555366b2ddb42a57e48b078e34c03094c463a603f7a28c1f58b9eb576088969b10e3113607b1dad3"], 0xb) r6 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(0xffffffffffffffff, &(0x7f00000003c0)={0xa, 0x4e22, 0x0, @rand_addr=' \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02'}, 0x1c) listen(r6, 0x0) r7 = accept4(r6, 0x0, 0x0, 0x0) connect$unix(r7, &(0x7f0000000080)=@file={0x0, './file0\x00'}, 0x6e) sendto$inet6(r7, &(0x7f00000000c0), 0xfffffdda, 0x0, 0x0, 0x600000000000004) connect$inet6(r7, &(0x7f0000000380)={0xa, 0x4e20, 0x4, @local, 0xff}, 0x1c) listen(0xffffffffffffffff, 0x0) accept4(0xffffffffffffffff, 0x0, 0x0, 0x0) ioctl$sock_inet_SIOCSIFFLAGS(0xffffffffffffffff, 0x8914, &(0x7f0000000040)={'team_slave_0\x00', 0x4000}) r8 = socket$isdn_base(0x22, 0x3, 0x0) r9 = socket$nl_route(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r9, &(0x7f000000c140)={0x0, 0x0, &(0x7f00000002c0)={&(0x7f00000007c0)=ANY=[@ANYBLOB="280029aa2c332d5bea9bad39bf59452a34d9d8000030a2b55582f5fd1e6fdfdec12aa9c829dedc5b40cb985464b366067f5b3489057398384641c619000100be085754872b39582adc020000006486764c95b1a0d000c4c30300112b7cd9776bf684254a0625a1f185eefcd79e03ee1710673a0a30af7c8713cbeef788109ce3a57a779d05208325ffaaba742b71c071c7597d4c55bc07b4fa208fef7e6db9f1bef859d7360beea439da5b6cd2e3916253d9a88d84eb1121f46d4882db8bd52f042df9def409cd02dfcc0e1f8707ff433700b14433fd9e9d21e674241f64c91664afe4edb300cdccd800a51c5d1a5b7ac7200d62ebf9947ed9d4e50cd6b98e93e536d65f5436fe2000000080fbe4ec0846896a11a1a265487beef0048e040000005a4490a91e4c32f69a0f7f9d8872d3fbbd96f2f280fd1ae1409438e5c5fff965c35fb8af18e83f5cf8581035a6e3986840fed343"], 0x28}}, 0x0) sendmsg$netlink(r7, &(0x7f00000078c0)={&(0x7f0000000400)=@kern={0x10, 0x0, 0x0, 0x80000000}, 0xc, &(0x7f0000007780)=[{&(0x7f0000000500)=ANY=[@ANYBLOB="100000003200ff04000000000000df2544bb125fc6c1805c5a085a3ddf0d15928b4530b24134c6b654d676b9b40ee120e47162ccfe0aa1542b60b68cf3b504b42516e19e0882aed94e605c52cd5f8bbceefa5ac3c20bc32afb238930d6b1aa0a80d386bd358faa2408d81043c28322c9556c3734f360f547092f260e97a119fe"], 0x10}, {&(0x7f0000000440)=ANY=[@ANYBLOB="50a005e90c071c51d7b6200000001500000425bd7000fcdbe7d2eca01bd592f93e00", @ANYRES32=0x0, @ANYBLOB="520a48b39d9173d5970c8f1a70e859cbabba9a79f80f467292bdb3b797fc7b0dc2c03831306581f910ae4a5115ddf7b676b4000020fe4cc2b6a3868ec49c50d8d4a4716ccd95746fe69938e83a8bb968b884364e69ba8d608930b0aec389f9ae23efe673c0425151a3542329847e98a065c62b47f5362c1923f68a56d8f5a625544964e0dbd7747a7392a3d28cdd688f"], 0x50}, {&(0x7f0000000680)={0x3498, 0x10, 0x400, 0x70bd29, 0x25dfdbff, "", [@typed={0x8, 0x78, 0x0, 0x0, @str='*}\\\x00'}, @nested={0x22a0, 0x24, 0x0, 0x1, [@generic="c392d28a68ff95d73755d3eee054becca8d740d8eb26c2666ba30490e1704b5c5e4cf81f74fa893109ded1234be3cc913ac896cf97b6f8615afce64200391aab5b63e19c7d19d125dc0b390e2231e08f13fc9e442c6c6b225f9c3612e8625d413ed74976af71be67b9f6bb16ca5eca0fc2507a1bf10a9dec77a7c12d3b6ae0e3a04c803ffa42dc412a3611400636fcd776d6cb35201c", @generic="238c44509c16c3c0d7346bc923f7e05e03a5a8185bed6227823e798f3fe2710cc8c6158b2264128376ef19154fafb147ea3ecbd46e550afbc5e6f1cf15d244438ad7b86a51261d7bc7171e827cd722506ef3412cefb1c9f4ecf5d602640843e8f9bddf1894ba946e900b9f4176cd5442cb8e01d00aa44131c82daae1414c8582b16bc8dffe579452a02b57dc54ca6874a7e7ebf1c0a822c849e4257e48462f4e4c29131383a63a4b34ed4c9b3fff45818532c0bafb0ee1807f8ca7b7e06bef3a37aa77394a440054b4845b90421559867f4caa9806ace11771254509c322bfb3cf509611c99cfd01296bc580ca6e7954723e89c23ee1e5da1aca8bb401613e4a10c00d4b0d4238027fbb4b8905c529e03940a266d0af3f55f44c75d22ec33e56815379b0b34c68f933215ecff5f1fbeaaca6f136ed0fcd4fa635148d2d9db8b600f3a2b464d6d4b62d3a6ebfe668dd8cf628692c40e932ad9538ff774d2892bdc94403463f0cf73b86aaa98d99608451aab9f51049fa3c8d7f3e55f05753c50bb87f5d957b1d13687a1c1144846c9962bbc14c9c693ed74949c935f703a144391e797ad0575ba7391a85117ca1f249dc97a7c418b501928430b53d1bfb2d398526b37476f991d1735309f4a74cb7a2440e268acbf716edd7948c708153a980fc8a102e1be3630a2e10919a4092f42c9d85f29b7add5bd7c697f8027c782cc8753bb43fc3ddea3d48bbb747a485273d49841ee33f2c9d5535a1c4e7b7f42824c5174c2804c411ee179e6afbe691957c3412981dd87ac20eb8744e13696de169a3abd9e25b16d1af19cfe62f57696fc7efc840fad8b13c4caa82635b8b8fa32e00193b78054304aa0ae20001236f2e879662725c5e33a2347a7aff39e9700b6d4c18c04417755b7db27c9853a5a0089baf4a8454ccea8618d94e6ceb2716c8b0243a742b9ec386ba9e60684bb2cf26f2b32d60a56a812150932ff0877584e5da429976bf59d24a70228fcff7b3e9506f051327847e1b3bc399a0fdea35f3d23a54b0d70689228c698d02ec68c44cbb1d28e37602eec3062c5312350fe386c14278c698061f2017a60bb4e394417bbead4ea8fb6f3ad1be6abcd5636f280f756275f3fb582e842cf109bafc552acd4eaea412c2e42fb0a6295f3de373be0850484c85a9a9e77478c338233aceddd8e736dbe1359c203dfcabcae35a9a31b8a7bad8b0d68c071ae93ecc65cdec6150b9846741bba249b583599e5757d32fe6ae8508e9dc2210ab9b9b679cd9cf2eda6bf619f309f6322742aa23e79126814258bf9ba2ec38b8a2be3c4614bd2bd28ff454a74ca3a06c87f6344fd65fe099da61e3bbf830a7e576795cd63dd97e21d6083685fb8f8608530fd8ef61e5c04e06ae5c288471bd009c2ea33db5b2416fa28c35f515fc746fc8f9d5b35738e36c6690f53ca15e46594a6b3f47d924b1b0d5cf9ed91f8bb0bc21b39bd7c1f1fe8f6fb543d3021c7268cba231bec6c90abbdfb2ba709c7dc64f44710486962a7ea8d34d4a39b440ddc893bbd439f1d72405b334edce3f212387424c3f5b3126b64709a05c718f21f87a0a7a4567812bff2058f6f13ef69316b8a5701edf914d85e189757e9cb469bf665ba39af808523d64c463e397a106b49dd2ba5410b6af423d10440ea2f3d6650d03d65b7a0a17e88ff10a4787db9cc5e9f4ef69bea96f48bb76bdf46de2c5a8e47031001650d8834fc861f7462e548fda87aaf3d7d577c2ee2c43554e4540a7fc63d0c25488b58b6056a25da6acd83da886b508f1a9a1da583753b16f6a419a37f68298351ba3eab2925843229d47c8c87a9afe6f3860993595b012374fdee4830691d62ba6b8a4e7dfdfba57fb879b8da38e3cd2bbb1fffe8e19066a48a206e8dcaefe0672ffefca64f1762eafb62fa1cfc00fe66e00baaac7f5adb4664541449ee957f33ec5683105b7ef056dfe5bc93875371acbf0530f95ba6ce5394fc19ae5967ad8dce677522f3719915e9899700c8403d7dc7b1146b40daf69414bee291596f2774a848d1a24461ff0d78ff7a309afa13fd5a448a137abe2f85e67a4e0d333bcce8b95d09874fb253529850e5d67cad0576d230769fe4d6ae02251653b7622118fb0a5a59c5a225d49583c73ce3e0ef4a9e6ae37ceb9163d0093e590e067c2fc1caed574694c8bfafd130bad222e2ad16f95fb3cc51376143b53442c3e41d7713c48b6816f3ecb350ec6d6e63892de8037106706ab953c073ec8b88738bc47ccf185fc20a18ac40f1b66bc12cee2b161553f556b555a5a2d188df60f7a0c0f07e80a20b0907a65cfcd3f170162f73ac018e43d55d7e7488bf122df67266c6a33fde3d5ac33c14f2b0862ff9f060bea1c11035a8f4ba07a70cdd23c612d2ce0bdf4af0081c57ba2dbee8a64d2307554f1249d00cc97bb81b8af11d009021d4e16226ad4c4a19828ead98b74204951f2134d1b1077590df397d99fe9eb690002c2d8aaab88cccde1e50be7d6ee37f5ca32c73ff2e3b5e069c5501fbe4977a49a88fec1cf3d28d8019d189d59cf85d0d081a58bf791eb2741f9ddc2bbef1e542f402247184b04270bbc4290e31fb3e935ae46b30aa5aeb3931a7b4e6dd24a09543308ed9bc75d4df3e2b41705600deec43976935f5557f53c34795c01f408507ae67cf82908feae9e7cca6055be8a150a7e4049a45646418d9c1fac249b07f3cd6567d0892535b6a19f8678bd6d0bb5a83defaee3d43b4cc8b974ab69e87bb409f18f10f224f33c16a8208301dd30ee3bc72cf46aaa49ba28564234cca8701acc4730ea0532d14eab8626ae6d9a42775569e213bee3595b040d24284d0e06e0cbcbef349363a6912685f6723959f8ed03da9e54bcf13cd97d4a2656d50003cde7427d4ccc0d07fc005a9ce01661532645d95da30a7175d4bbf9fc804239a3b88c9e0c3b5c8f06e64c762362d11957e82486ae464f82495192ecde94f51e95d6fbc45eec59a7ffd110dc73b7c13b1159fdfe19b7246981738af4769aa8e5c7d0eae0e84c08c9a1e3ef3947850a24223322e97b7835ef1c74c3dad00f8c94ff618647c6f6ca149412769b63f6935c2de6740575ced735bb71bee0673f00f62952819ac3ecce855f005ce32030bd38ca2c155f30e57c487e4d6a2f3516927ce93ff624d24ca5fa25ae8af59659352e9faadae52c6d3fdd1a1ad99da806169406adab524cb8c0f1a5a9870d7024c75419f0fcdcce49c3de032002f35732d46785d9abf8899ed6816cdfb56cd58a29067558bf0cdf7d1863c889b5b06c6de9187fa13d68443268ab099ebf948570a18b8a95cbdaba1db2b59dfa7e26160c4e3a1a59aafd3159ce1b4f2b379dd4b5822d136009c30451cf0f87689661fc310138e0abe1fcc0e9f44d428211a391d0692db59751906cdfddf3f0e3ddcdfc037428785274453193c43a172730066ab42e407d350fcdfbd19b79c7282cf2ead0c8dd6582197df211963582af5473f86d9f0de4ee8bf82e2241267a9b34d6402225c689451292585aed7bf71dca358f8bd51d91f4be366d77a3eb3bb24ba9a2baf43d226d0f42b6b5937594bbb9601666732149afea9b01308cfb3e6d62fbb9da0c420923e18bedc06eb61b1ac9227759100455c1a1d7ecf4491f3c9536f8a0a132992042a0eff7e5b0eab27310db5ed9072c90d40a1f7c0316c2070eace3bdd37ea76b5660ba0e4e805947c4807598506646281e344952ee4b0bf5cd755df4aab586fbfba2860fd05ed70c88c233411392545f25853fa0ca5a6a22fd7c0d24d3090de542365dcbc86ee4da3d5a0030cd1dcc109cd8228b80150d8ca9130f9a60d2c41779f96eb955789b4bd8059faaac0cda3763813334177b18a90da3cb0da05267ddcfcf734436ece3096ad47c5b4a4648f9ac2f8d0b8e4909efbaf79b441c1fd57e173c842c388b37fdb24dae9f35172223977285118eea4ee6539c219f165d9c73cb49ae87d0b5ebfc9a51832c7713d76f1f8312716909ed04864aff390ad5fa444be5732492ee6ecdc68bcc51d6d0d244fb27cc5d37c3ca048c11b9e1746d0dd93c2657d73cf4bb6b8e1c1c078527989868322f04d12d84794ab7d7a86cb7eb1fd316bee7286fb8eaab394ae71db02bdeba08820428342760c1382ea02b2b11f69d8eca7208756408e192017ec55f94a2b11cf17b7d7e0cdf5963700c7e073726bb9a0a7c5a9c6538c346e8c51c8ee624203cfebf89640004e13a01d0abe5ec851960411d2964be9bbab1349b77cd08ff2f5f3b522229ddc6e738d080d072be9a6df16021cfc8603120c0614f735c9d0a8ee43756c8b153602aa5387f68127ca53d146597a7c36dfd3bc6948e3f24418affc377d0c9f62f86307091a5523d9c1c81b1db6c5ee4000acfbbd98b0e2766ee028152ebc77e2a2119702d874613f240543dde82f0ac5786beec271f5495b7bdc4d7278863383328ce8ad6f713c831bb7af41ff07288489e436c2f31b8710cadc0ab03b23047ac4d20a3daec1ab7d16d7faaff68b413e69b719c9708a758bce501bc0ace88ca7c72b4c450d5fc6dcbeb036b2a3ffcd6c6d05fcefcc073c52f555a04ecf8c7659133ae203b927aa389e1c7a74cce1e617088c7157b14630044120b58000b779386bb17c8d608a584b24a368d2a04c107041a20e7689d9d9c835b73907de136dc540945305ac22137731604fd9e8cce64fc2c65ca4fe48b424ae02670497c12bf6c7ba006d434e1a70110597208aebdac009a5aadcbdceb9a37494b0fbc69b568d4ba016a00f5aae3a57e2b87ae8a77ff7bdd6fc810629e69368ab3da4e6c6d3e92aa23df8b3d7cf1098206725ff83d4f34cd7a51b64d0aeb37503e8c6589f95f8ccee843a63907b2b6589234cf95a19f1573d9edf4b0bcedfbcc954c07362b7f1773ac2a7d0c75b531f90c599446c343360b8fd1ccd5fd9506331d3270fb6c81501088377769785ee5036b18eb14687f503c6df9075942a58244e94aab740245c132c1f6b5a74075f6e1a0d6cb0881dc9c684e19469b67392763662708a883cde65d7e91f4e7a5d5c23ef57e0cd77d9c1517502df57750075c945202bc27f1ce68de66679bfbc978c86340678d73e1d05a272dd5a14b02e9a6316f95c52bd89a73e7d22b2d9b4fa935a53bac308c32169af57a63b9735248ee346362dd773cca5fa682a74778751b0e749f1871f62fdb3ccd71b76ecb61be7c08467e4328ee6dd5653e0bf6088ba708b9021af9c46ef96a5b8ac2ceb5606da1b9d9e4209713cbc12fe306743bc4af2d500a73cef7e28305d65fd34ad2a22c72f6d134e1f87ff66c049565c9e09e2e5081283d22e383b520947db76830db840ba6ae6ecb1c5c15cd71df47e52211141301eee9b26087e9e033ce59fe5dfffd5af89eaba453d3fe4f512c9eee430932252acf9066057f499e452c7b5267acd52a1c43e177d43d0ecfc4378cbf701adae9243b9ed0db8862572ff4e27cfefb0d8ad4bb40730507678c72ef03dd46d0b534ddbe9a9a8732dcb493cf3ac9cb38d205a935b88d67688620df25d526daaac65ef23101efacdb38f7c470d28bd3bbbffb35e4e89584b69c57ddd445ec507263cfd67b4f2d00d5ee0730120ae8e1a3c2f053c8be84ae2d42b36cc68ab8789805eccc40886f97968fa830f7f36913a5fa39e02a58df3c36752e28b5eb10c7c3abaf7969302c1f47b3c4e6554cb2d3cbd67e8f0d8ec71c43108409d723f06fe291dd9eda7e046cbc7288a65ed4f8a10f0ce0a84df61fdedf9f5c8c5d4c6574", @typed={0x8, 0x20, 0x0, 0x0, @u32=0x4}, @generic="5d408cd386d05a267ca3e4cac691d896352ff5e0538d4b3cbd3fe502159d1fd36ee713d3204a941825a989bd73f2ef2baa8649f5e1a3cfdb8b369f4580f7189b04c7", @generic="18393325c3226ea0fa11fa2741900a047e9d710d1b4d76e2075bb1f4ee29e17a43175e2c0ccd544b83223b9d725b765511c384b27591ee3a19943f6b417a059b48d6da5c40a5378bd8672b00f94ab6c5c7382ecf", @typed={0x8, 0x1b, 0x0, 0x0, @u32=0x100}, @generic="d8f99c4f0f1872439fe024b672853fb4f5f8ad591b8c1d1802f24c543e7bd38f80aa07533515ad6c0ecba5d950dd844ab782fb1518387da16bacd310bc503c5dea1b1ced0162e475bc60a71f07a08527c79c332a715e030e9f43ecd9121a29918f8080230f88a664d7dde4d3c04fd2", @generic="004b52f520fe2a84a261f616299e0dbc21479bbd017d6371240788baceeb8dd74fda1cf330f5fc797a79a878a522e82b6bccc267f35e66ae772bcf44cb1754db9e16ce19d443d5fdcc8b6851cb2b9650c5c9b143d1730b40a80ac962d658de7c107862dee283b38790bc61a36ef652d905c8ade14fdca515250bde0a90819de48e61b33ce6a65d04f55aa9d120796b501fcadbe459baa6fd6312279e29e953c08f2005ac6f4f17e5cc7429d55516304525ae2e09944e2a0cdb2ef8006b27583f2eb58078c9193299841e7256c9518b3ff292aba7e1ad985f16e7b5b7891d8743fb592477900d2ba8716ee2b350ff91f7c952722422ea64ecf6c0a4e899b89f1e489c66200cb03cfbce54b6512c701205096c08c47dc75dff7c447003f7d1ec04db00263b1a8edbb0d7981ab9b0363cc0c9b77126fd0f18f5ee6652b8dfe88028b83e5392c119da099341f2611d614c84d17e695b64100014e3cfa105709e9a7d6499eafcd8ea99b5c9657ebc51020365c5065906aebd0fea26d63c59bc09fb191c231dc61b8201693794acd81c8ba9df74986532da57b479526c3cb02dd16d597900451ffcb7e6b3cab85eabc16586c61c05761770f3028d76bbae426a948171bf2b4976df6eaa6ab213b9d0689628518ae57b47b1f8e9b1978fe0f9901981aa3f9df4a0dd0c85d7ccf5156926621724930b7196ba30ed4f5fe3f4226846f83e50e4de903c24ddb041343b78781a62a6717eb2138c1a7be2d41e313b9ce3f30e1a21a58565e973310f3895ad61e2ed08f1dd620bb0d6ee83cfde39a0e178d26feb3a1675359d5305cf01817b4a6e69004cc7c6faf799f7bfdd5390c499a4674dcf24748a5a8f95a376dae6caa47e5bbbc5ff56eb7a3c5cbdbc521f4ca7dfd95240e2f98afbaa378a64cf4ee27ba53566e6de8a10248131789406d877ef716748f542b23975b0558004babe08dcc32233232b44169207f88ebd941a3403a0e1973e7fe4589b237a9249d75f1d39bd6e873a6d0c98528583d076901e4a52067ab6b23e2162ee0ffcde0b40610072fc6aa0751a814e5b8409e8e9f3933f527a23a64499b7e0e6d215bac488b94938988b1dec7fee73de6731b8de3935a6a0d05a915cf1f77b2dbfb4ff014675fb8d2722e5a0c6c98633fd498918c5f01fcbe926e8611654f318dc7f5ff1b192899f726f9cc15d5f385f998949c574ea1a34e98cb19db2e8d2aa3395c51494a5bc34acaa71bad8e8f151b88e11e7e7dede9b908236bceb3b717956ec3355d70cd083e39fa30ab109f79914a025f1b6872dcbfcceeda539714c29ff8bc4b98768f6d93cf538c75f9e174a76c53850a5bf2ea7764186cc0d281c012bbdc2f43b506c370605a79d98869215ab0ea16986311c2634d78aaa5733281b53eebb6422693ad5fce5ef4f1871fcb9a2884e4b48f18ed1e056e4a1ba397d064137d4a0479f55a5d84040acbbb027d3e95f8b7672ca23deba0a4b4835f5cee288d2e3cb980a9e3dc822e576d839a87416704b8fe13f06150ed262829567a792b8d5157e2e2e1681e30db9f3d2ac0f23062cde43e6d4facf97b1c080a0ed9c5d3c49ca7e01ec6235978ffe3050c675ed26156ae446cf419f28bac5925f5d4ddbc5ea9e4a7d943fc0889974131ef7eb4868d9bb118e77ed4855538ade21263e95ec1970277232d19c34fd1ab56c3468e620bf69174a4b019a418ac55d3be9f8a6df193b13376f63ac0232078623f8a3585de97a141488f74b105945899723813d4d357270b768adc15b9bbd69356757aa4ccc83c23d9a58ac3c44da310151e6ac0e238c090951cd52f413790fe5d1cc85ffd0f568bb7ca2f587707fbf90134c4b4e7ca863d59003e04c4244b855ce475366584874e9e25da5b87ed423983a27b770f57c8cf0e1b2037faf477c04e4c22b9ca1be09126e8190a48a5253abb293e138c0f066fc2f0d935ab59d3f6f14cb048ff42704b4a78d338f28ecef27ccf334a12517b159425c69c20d5a8b5ce5914b244f02643a9c6028e771fa8a681110c8c7c8ca6da4181660b8f215e73d5a85d81f36dee4118d66bb29e69bcd9d3ede9c7be5d3fdbc0780ad45b2449d452c3bb135f00122f2150af42f39eef3c19472f945903e5121e40bdbeab364f7938ebd95de722629f8e433a9904940f7da8291573611860a2a337495a2750b39d0189b5d71477431c838f843937ae0873802d2f843c1c431b927b3a9fb2e0d18462329f759efe52e94f1b638b7d6ed9beb7eea9b4221bd6bdd24b8598aa7cdec9594f3ca3fbf40c57d7eb39259635001b681ea0363704206a868135ad1e4360d04d2867d40c4376331200ec89da5fed3353df654d672c0c00e53e55555749fc429cf4009a5edb4f8f160576b3b248ff0f203774a2139ce3192501d4336a065cd37773516f4a50bb1419ee7ae117973b6d5d9b8c221eab5dfd174c2fd4a2716eb93122c04b2e5ca1accfea6a8670a45a4fb9307019bd43b7eb731d5938da34c0deaaddddbd200a2cdc80e2c612e7592851dd123aaaf4e9a985bc98068a6aafeac2b4368b1169f28d0552cceb51f67d24623c8bf7a76ad5baba02ce5fb769a000480db5b30e1bdd6d9b9b073c30341b8a43bc860762ccc5f9c9741b312d37540220342283767c5e4fa0636d0f59d65d9cf3e7c4102edcbc84a164bd0a7a6d37336d38307e9d458d9fd824b5d9aca50a4c720501ed4f9e8280cf334015a0ae25e54a27d915cac1fe7a9a26bf89dd3299837fe5086b1fe217aa5d9a363d61a83a194d4b42f14ebdf42e19f14e1c2cd88f41080cb586ed7e50b08e14f314263aeb9e2caa1dd47b649ee6a875ea7512636173db78747ce2ac997235c296869b5c295a1898cdf2b509e38761b175abc04872db51ffa75a4a39d6d46e5df6fe553445dcfdfa3aa43e08d33ac26c184a94f41789ceb0adfe397ea9de5d7d6d91368640518735f7af46b78cf4dc0ab201434648ec299f3d4a0c1200ef7bdc58b5d7767384b33f95e3fe953f55881cbeb435c146c1aa494995e48eecf0f2e7bcb726aa99fa9e8d9d9b34a61fc91820e8f6421875bab39a67b4aab9989922535f559c9d769eaeb4a4778c4de17bdfeaa6db831f77e49d28c18d37e6a8bc4ce5ac48345a1162dccf11ac5703fe7f8238d36bc396b855815bfb892842ee5eb49b36eb2a70495f8e3074899320e493c28ef17cfe76282ee20c1f5b4ba7709fcbe6db3fcc6c5fa400cf790ecbcdfc18883b394a48d45b287a401d700651fcfcbc7385b905d6955652c54106a49af4eafe5cd8e805ec6fa32a99765cb2d966d0420e8bfcfd1652c0caf180e91c961046ae47e7e411dad0801ab7a6015c8a7f6877f963eadb88072786f76cea7a063edd706ea26bad209ed561d56a30d8d83ec5021023391ee5ac1985e0c46b0fe71be253bfc31ebda3df0c372a7544f60a50c9dbe670905ff228cdb8694d65c088172434398f9f89b817ba5b24cc07444247baeffb1bd412e711a818cfac48098636492fa9f5062e81e7154a90ef408913989fd43f434dd90aca158db1da88cea5e2c6836a74e38034cccb64801db41815b1b3d712a4921b145b6246bb11ea2d4a121ac68b60b87265efdf89193a88f0d36849b2ce789af272fd9873bd415c2e0a8006fdfa8fb2918517ad63085c3e84f54b82ffe160ff22f17a9c5d487df1c151b01f562c839e606e8a3e493f6e6395a608560934ce83404b34f928e8ba6b1b4011d119fd9524335e5369173e51a72440eaad3b775cf7afe30d9cdfac79a849ac02f1212a6ac82d366f7ada23df0de9a9b5796fb1410994fe0556322ec9cc0c730f3cd48df7c3d6a8de425ce5b35977c17783008cef67160d3cdae9e108ba9c73a7cfc79eaf519569b8cbe1626590c6f6872920dc512ba0c87c7592e82c13e6a8ecb0c0cd2585664eb57dbfe5668542674f0519d2b0017106d36b8008f79a01f991819e7dee37a192f8f88b184859d0c5f65ef072d3950293d8bc9eaac5919cff52ea82862a4c31aac87ef46f4669af0c69349234d5aa22eae4a5ddd781ef5f69990e11f16e5f754b205c987ca3c99931f18bf6216c500ab0128b0db24e2a3181ce9091428f1767ab63af493658b0cd084cce44afa6ab0ac8b4b62e6834f632293bd90a145f62d41f96282656093c71a08c550fad2767de8ff72a6c5b1cfba3f9f053b157a562e4658e100de82026567beb77263878d34bedc05549c9d02d6fb6aee7e1a2f89c97b4d2aea05411e192db38b9efead7ce3731733eacb5cd6587e2e8dad8f3eb1ecf7fc4a3a0183d222e4772b897416d85779f1c7de6fd4b270d598b733ea97239b998c12e904c103bec49a4718cba42011ec33b01b8375051e0071f15f8e578f226e9b9235d7dfb4ea5259f2729c5c40346fbc04620def8051f81d2ec9f171865eeefb64925fb9b9a05111d6020b6838973d0ad6717ba56d103ab4cb80766d9eec5ea8fab94383b4f0d99db716f09ed818a5543e76c8a9286bb4d1f3ab8c4624820227617be7a96c9fb8dac2bba6b0372544ebbf370a447ca9e21bfd281834b87c658be4069daceb291bc5009988fdaf5ae195ea708b8534b66685d0564cb6cac21af46e7839b71047e4a63ea6f3e9a988f9541b25b29fc5b9da5dc69e44ab2f4f4a6bc5d628bb3a71f3d5c489c2a0d700cd4d5f6d279e439a52d8447468f05342e45e16963aa56c2f874dc8bf02e008846f663bb1fa8f6bcbf84097c102c0bd76ac78f2c641dd8274877204fe25a0129caa36f5c0b7991d1e781e66fe45f4b3a420c79803b97543a7c5e96eccf5f18997cb25b36521d859d9396a2bbde0eee351c6cc9388f501427a6fa0a4d3adafbdc829ea72e4659c9d83e50845df789461f2c52d44846a2e431931481b17f9e7e853314c843ff3069f633848bce2d18ca78cd74ab4460146de10c8fbdb5f3edd34647d8e3bbf2a7f4f034604ce14c1797329111c897dd51791043a32691b51b666774e655755fc891b32daa136dfebea17eb815cd780854567e0c19fadc070bb28174fbd128b88f7757a10f910da0418ce1e59ae4e3ef02903ab590a4ba7fbf524121feab2b3333d7b679a75cce3ee8caa83d19a5141f218a9987f83de966e7fa4318edf62bd23173501a21520943dd68d0a99f471f6438abb0296a4a8da141d4c2c95e7a607d76be5183f7c1d5863198d2223de0fb72725971d748182f66201ffcf8476e716fefb2043eb60ff322a3dd22c63bb1f49c31a87efdcd87be13cb8c9d1afca905173eb7f3c3660120c888046d74e96dee3f8ada6f1740e4bc74a0923e496aebc30eb6e5619c5947b51cdddc9930c78477f43de7029a1fd1abfb39992197e4f935127fd8346d52edef6617539689eb0e03fff6928d30d9d5ddc8f230f5cd64093e1c502ee4afcbec7c5f529388ea333c3af387321913f119399d8b373b0b4431edac324df6a312960975dc5b83f078e63ec6a4484600dd81cbfcd9814a3d8edbf03e1b8d183f5452123419eceee9d53b68a795d8929d6679ee23415d75b57f862989487240ddbda1811ca6860318aac7ee0ad439b6c65083c20396c9dfd885e8f0a4792f08f5e0b4285b62077ac48763d28f3806fa7b6f05f25d33dd97c63587cd7e2e139ff0e42d6c0d6f05dea9847cd49c6b6934426a42d07fe111711fd01fb00bc5186df5ba0cc2cb52e5beab02a36b2322ee9c5b9696728018ba5f37d7e5cad1be0615632e6dd9353148c5846c2d3fe806a16b58ef045d8de4ac96ab523efd4459fcc611cc97f2f6937afe7a78b86", @typed={0x4, 0x6e}, @generic="2f877767950e2655134cdd84cc3dc0f4f154281a9d9a44b24ec43c07a45ee3607585775e0143b707aae48396ffaf6d454948999323a30692df69b61eacafde3e81efd89a23e741a2fb0233300aa53752c198337d1726821307ef7b7f1c3d60b21e29ab35e231ec2bace3f06f97daa08123f02b70133da2b1045b52058caa346040298112bcd6849dfc61477271d54fc0619de4cfa3b61debde231cf3d23f4fa5b4accfd3c144fd91208b343af00cb92326536ffa60dd962f2db5fedf61984034ad5272d426b99c9266d3b808af9dc820c8a2d03e2eadc8194c1de53c22d61447eaffa74c15a0fcce5824211701"]}, @nested={0x70, 0x5a, 0x0, 0x1, [@typed={0x14, 0x79, 0x0, 0x0, @ipv6=@remote}, @typed={0x8, 0x33, 0x0, 0x0, @uid}, @generic="4080318b24064b55128ad517f5ecc6bae86c92889e42a174d227f88740e266283513ed4ccaff12da7b351e2995058ef1b130e0eaa22a78d459868f2c8d21ad96cc74396e", @typed={0xc, 0x51, 0x0, 0x0, @u64=0x100000001}]}, @typed={0x8, 0x15, 0x0, 0x0, @uid}, @nested={0xfc, 0x88, 0x0, 0x1, [@generic="4c8359ee1e4d9ba13f97e3f2fba9c0b40709a96569bdfae77e55938113052fcbdae5e87109398752f6d8b99b6fdce05873617fe9fb7dc3006ff50e6fbb05e4cbe73aaa389dae8a2bc29a57f7358ac0adf4130f91d887b5239ebdb5d4722acc5279f78c9365cc66ce84b3bffa93f66ca4d329c58340b0623f3fef100e01059c84d7d11b7f53c64a8f55ce312bb07e909925a1240b95ea1544b77138835711bc0691d6c414801fe51a077d9404914efea4e70f5ce18e7ccf5937f7c1a30b450c5156546548cd8bf8c1bfe48b6414067e47ec5f0849", @typed={0x8, 0x43, 0x0, 0x0, @pid}, @typed={0x10, 0x5f, 0x0, 0x0, @binary="91f2f6fbb55d6dbe436e55f8"}, @typed={0xc, 0x7d, 0x0, 0x0, @u64=0xfffffffffffffffb}]}, @nested={0x1014, 0x36, 0x0, 0x1, [@typed={0x8, 0x56, 0x0, 0x0, @u32=0x8ae}, @typed={0x5, 0xc, 0x0, 0x0, @str='\x00'}, @generic="af8c54b2fb5bef1d5df7d7093eb07a1fbdaac1505a52fc525bcb6a775004eecc7ef71d3457318078a020c6c6d7dd3fece43027ff3f1517c461ca5f81f47db3dfbec592519306ae7c8aafcf81b91223ebb2c25717f0e26619886b6edc358bdb8df97b575a93fe1189543790d029b2ef2c70c12da95846757a45d23c5b83f6897e1ba04a72e455344376d04e6e1be855e0a9b81b85431260ee0f3171e77d5e8b2ce039d5e61798942033cd82300763628294f0e269fc726cdfaade7f890c6838538da2595bf209d531938be8d974e3eac3a60bb0aa0e61471e80f89ed8d3d4aa9af6582dec208f254dbf6b960674fe2db6d390b6f37f99b0f9bcb6b66c5575490ed1491b1ca3513ab28c80fc55e912fa9181cc0732564798e6d075c5cedd8b34d40ddbf85cf29ae9df4557752b2c570d9832e8b4181cd68713f9b425f692b1c2275dee6aae5136eed160d1a740d967de33c9b942565202fbbdec45b5b34ca14cce245071e142b27dbd11bdf62f89299eea1ed2020d7d32bcc65dc6615d22a2a97bc0d58d9478ddfa3575d9c9a9ff56204e015b51acb12444455278d6ebf3f2b9715ffc7e651498e1911abfaf0181a200af387624eb2ace09cca48bdf3e37983a26961510c88304ef00e33b2c7e2df47894341c1f28b5290097d59ed0f42aca384f0b7397a86ccd46d38394ff1d1fbf34b2e4b76a7a2c2e391eb8fb8633ae0d278b109d4a88eb8c1b96f105e1b87c73eefe1a623c121759afa771d0ee514efb87255689b0f1a61b4101581b26ece2ab0d5492568f123f10a7ceef7e47ff659efdc096d005771d2e8bb5a52afca0bd1eb2da634a322636f07dcb95fa4706cd06dc8dc01e6b3eb2411f59b4c34ff464c7c00979e5a0cc333f05d9897c34463378f51de23b075a3ffeaa5ce6238c383a618f8dace7d4cda990a25bffd55abe26b336de1394ace4b35fa1cef521f98ffd16de6c34b7146e487ab269fc899ce58c5c2ff92b8fd1b8ea3b585d04345c08f126c5aae8c3be680ff97e35d32096af74b77293357e8e047dea6d7767240decdc80abe10727f9dabaccbc5504f47b7c7baaec14ecb99d8213c04e6deb3a2c8d971190bb7cf30646fa576cc574718fee21b4c67ca173be2f1bed8b423259aa623539bf2e300734be53ed3ac1faf6f3fac08d4503acc043afa2655e673a8685bbfd9354d1504cc48411ce28476b5b965115072c83237f67d57870a3094b92503118c7985d5ac850c2864a28f0ce7a922c7f989bcde4714293661c1f4a30fea7619d4010270696542331c612a4100c1a1feafc89ed7c169626049861022da615342604e7ff9756eafee6af1c8d14abd445304ae502bda6985621c538c326f2b8250e2b474ecc2d9d4feaa601b62466927eab9aa2867f76573746092ef30f9dc99606940674ee862a3f28c230bff03e1000df96f20ade868c0d739c5dd615b4f5fc1f7b5a248b11a5c7501c8df2f8ca796426d13726d08cbc8a4aea585000a1efbde29948fa99c09c74581c8461e0034dd52d6d89cba1c8afa08c99c0be55f91313baf43f2abb173a6fdedb831663c0ba33c9241e656e9bd4af112c651a4d4c1413c4040547e660917e2f271814da3ae39f2ca0edefe70ab9f6dec84c800bf56675d88cec2e23748482a30eeb0ec977fd0cff9b00a6f80c0e7152bc7bb7ce0cacd73e9dd6ed37ae3dcd178163f7fc74ade33b0292a71d03959c400609d151649f380df1545ee85e73cc96269ec13f5b38eb9d0449298ad388dc4d9503e8dab09fcce03a284e6d544301c7e8652dc9695bef4a3c721a089b6ade9fc99125e0eacd0d17bd0326c1f2f4d48825055a60b19982594e7585b45cc98f03a97a912754d36a8aec7c20ea8ff5cb83c9202b7097516475552d7d7827c41a5ec694ec85eeef2a55d84f91f29ba36a6c48eec5b2d1758ebea7092ce358994e2f2c496f508f4e514559e09b5975e2cf077df2d98bdd3a35c78a81a7baa58fad6cba694b6d849d19d8ebce1ef61bc90ce418894776fff026021e9099ee672427e7017625954e33fba0546c51f3bc911f407b04e713655ec2df6a20fa6c9e83b53dba8baf894ca5c5abd360ec6f6ec51ce768f6d74be4911ed1a8be38d02af1dae8b571cebfe7bcd62bae2c4fbfdef7baa2603914f4f4d089c86a27a465047b489928ea92d3de9e82f4313c0a71f29a518dbc57a6d0b430fe58fcc0c0cc5a491699c1e92c4c18d6a8ada09d42b351acb436045f9ef7b9d37f052d72fb9642c63bd5b1bd2fc83a2e57f97bc91c04a367eddc8e85842a95efcec147e32d177ea91606ca1f0479a8ce940a18cc943e8920ea6abd1f423acaf375237d80b12715ff2cba5fcef37b7b9a1dcf10851a685719e8c9f01b494ebb59ca8761b670e6a6bb56a5a5d7df408a12aa1b42484bb029d602036b18d9e64f82fc20beb9dbedc86ca24ca6955234277822c6e4b1075afd1db3dc56ce65e96decd26698c347f72965a3e433e901cdb25969abbdd6e2ad55ac94e8d4c43a46bdf4bb722c54245bce504a761e180820a1cc0e269f575f52a3d192c9fd20e70780c2d98486d85a0d078fdb57978ea80a70ed3defac5ba81e3f26df3c38822987850374acf6ca27153b2ef94939ed3a4f9786b4b92d7ce050d9438e233bdf8698242a78936c96d2b86e8206b76eb4656ef8a3068cc46ea5200c53313c0c247fb5122a27e3809d81f108182393e54e47960e347e619f9d6a411de183f6da8a3f8bcce00f72fc8efe412d7178ae5ddda1383de6afa3d6f2a90b8911289e946a89b0b619d051246f2ca34a77062e4cdb686834a1c75600591351cf9773203f7f202f6f5d40ad478c6cadeaaa896d45a13cea8787c4be35b6b963c13649bc6e7b6001bdd85c873c57c549d17c94965efa598879b89410df7e48e1eded91c57b052b6e7f63ab794799c3fed6580f4c55acaa33ad466dace47348c8d32376444d6fd98c9b8fbee5aee52bd30f190f95ae064210445c2f22d8468a050df6d05a834ee12e20204f1ee1d2e2e5cb22ddb70ecdb26537a61ea9d07707c552bf10c30e15851a9ef7fc034c843b006922ecf3253871978cbbd8c880c250d29dbc4f27f38446006a8d39abd49b33ac0759b634b3095379b923d98b3c1992a1a91a4e0633e20737270ca7f787040a27b5a018f2be98e4327f4b24e81eb9adb5c434e2c34e8e16325ca9c544d6b1812850fc51d2ee33fb9cc98c2c503262f4c4c6d2747a898feff843520e2675a59c601e17dbef934dbcea1368f49fd08708ee1fd37f00ae37e0a80b76d853c9bb6b104d8f3a375acd3952e6a12a902a4ff4c01c8935651bbb96182080ad056f2c6a722e7608aa2d4f3c8be6053a7cbc90928080a402e359e84dff0400cf5c77fda25855576cee728b6f83fa2f47ee75412485b65decdad7f5d53327102ba5822a840e542e16a751b71ff507dce6a85ef5a37a26495f25754fc0f69b0067cf92a8d88c01970689152dd4dd871e476728b4eb0df0967df11339a277a332b746210ed10124afd1bdba995100db2d996c652d24167ae6dc24173d4df75da62fa4869faab5cee7c7c81295252467db0a65e141f29f0ad4f22c5d2dbec77cd9ff29df30a321353e81d1582bc72ddff3f6e4191eb1267147b570591dfe7203e4ba180efc2b52662602cbdb17a01449d2ccd9acdc0c3ba8c1b079b77bfca8af264e721c97fcde6ba6f77b0757a51d9fe43ee2bc298f2ba8783376258d50fa64438c77d481da64fc143b61deb271c6b79910afdabfdf8f46e71de8f49df9eee7a5a88e88393ffadec6a941878229827a94bd1018cbf25832ca7264162b43bb84c6c1958e8ac8b39436de0d2cb7600453cb2340161dade582d1f289d7f75cf4fed63d29390a9ab62fcd4d8bbfc69ed91ec2dc87bd29d324c13473e0d240b3f0f7d731e5f1a02d8d3867da025c88d5dab2356fb067d67248721d0fbb4c58968fb2a187df0014eefaf8c009a635b1a778eb56643b4371d24c3b8445b5b46062ef3a87ea7de8234666c6893a73083d4f7dbe27cdd8d1bbb2ef8648e9515af24afdbd0e16a38a2e055d37f9949facb217c635dc7780d4105bc2b1df20810af650e86fad0bb4885989ae428467c7c4e178646358e25dfedc70fa377c14ea99be5a7c3c8fecda6a46d02d1f801e77b52df0e08f1236d3de7cf95b84132cc4fc1d9a490227992d3c823d20cf3a75a0b353b72ac0bf437d26210b4858971937f579b6650522279b3eb875033cefc06415467a572345c4b36a5d8f51141ba8258194fac6ea49675fb142372752d8dcc497f135bbccde8090db941b54d6978c4f511d7f71db190502583f6273e078a724e920361daef908af93ec22de9175b21ce0d8294aaaa29c86491473f6fc0d41004e05603d1dd3f34fab0c39c14da2ad51b15635ad24eecc53777c7180d74e10fbe67f17b2d8c873f8e47dcd10312a25f2cd6b7994a0340656b17831910132b178acd2242fdb659224d3397ffbf5f8f4c1e4c2547c491d71f05a2a2fd6203a47f00139d2a44e096999b7768278eed0f9d5f3b2d6a006de85526ada5b65017b8f331ca23a392e410a5656ccaee69802b19a2e044d1b6d4d3617ff0185085d93f9f8e967406efe15d1025a1d48bbea59d7b4020e3462fdcbd44decaa586d17bd5d2fa7aa17745aa7597f2aa49a6af2c53cf2efdbadb9828e730e1aa447b08ab5a4b807b222d23cfe5a1b1f0624e545d2017e1b451435f3ec2241b92ea351acde0171e37fb1470d872a5aaf1c38342b6fee134002f657154eb9e4efd99a712827a50ad98630e7e2939e332ae9c5e39ee70a5d547d0fc9535fa7740dd278c6d03ad15ba374e364ea046f49c783c56d1546e766d90fc15eb7c6e37a4719a045842f6cfdff343f13a317f542ac0c19b61f0a8ba1e34c3690b365acf3c7bdd4ba72836fa6870b780b7c7d5f5962c69d0c4422fc992a6d8aaae739f592afe07af06a00c9757ca669b0744faa35f36ffbbd84727e92b643e37dfeeaf03b492b4b673c09d18ce2f3efc474339eb2cae605dd8144825a4d42f41a9cf4d4960268c469a46765f045a6056d78c50c9a2f25eda048a4ec9a32bd1727f4649999d3950a61d4053716c4969c387ab89f8d9f497fb76d55048206dd459b9174e6db9ce7448b0c0734850475bdc23595de94790b2d7109b577f72758f80b01e896f7738d5ca55b450bf37d8b9abdc35b0e3b612fb00a3b8357b133cdbf8e07878bb85e3b7ac27b5646366c394ddf32aa158e8780daeee832811d5434e16401706a6dbf1e8cf1052f057221fafe0f59265e907195cd05e497753d11019f183f83d410d294589e3ff69ba96c4e9d660f795ec9cc2edb70e85fc1ae38d022b2cee640045f23451cf7cc6280cc89e732866783a7b2cca3393835dec53eb893f5a2f985098c254b075810779ffc877b235ba64a2b62b04e5d6c560293f593c273baafdc0b9bb42e4a34419cd501722c153dc667f4b71c50f4bbacba3a25f9f899034f058b37f8b79775ca84ac2d1e529f43ebf3107f3751365049f92361b22f3f51a9a3f939c1bbb826ae6e7ea9492b340a5b03b5fd9872a1785dc3273892c08eaae69dc86904815de93cf53a30e515cc1531e13ad5966fa531696a86386290622deb8f056822f203158d3d5aa4851bc4f8a0a4955684022ad80f8c59ef30c4526e009b6b63ea5c9e40b6f319b1f466891daecd9b9497026de4275c74b82ead542df3f4a5e26d73609f545dc0e278e4c6c9c328ed1079d96426836e45cc021a9a98325b4b648686"]}, @nested={0x58, 0x63, 0x0, 0x1, [@typed={0x8, 0x70, 0x0, 0x0, @uid}, @typed={0x8, 0x55, 0x0, 0x0, @uid}, @generic="603dc830ec7d42a2a043ed4bf0c050438d1999ae4e69bedc634029970dd312e09d5b50afd003e8b8", @typed={0x8, 0x1f, 0x0, 0x0, @uid}, @typed={0x14, 0x1e, 0x0, 0x0, @ipv6=@ipv4={'\x00', '\xff\xff', @remote}}]}]}, 0x3498}, {&(0x7f0000003c00)={0x4f0, 0x3a, 0x10, 0x70bd25, 0x25dfdbff, "", [@typed={0x14, 0x6c, 0x0, 0x0, @ipv6=@private1}, @typed={0x7d, 0x1d, 0x0, 0x0, @binary="48bdc976abbf150d7d9a1bd6b5788289490c41f0d1bc0c86025714445a231a1f397df36dd120b081afb3a1c7dcd4fa1a4c5a7856c18e28d12c7d39cacd5e2915bbc1e44bc423361f5a298be1952dec43962a6baafd6bd9ef7ee9862ab54ebc1bf4f64d3858cda19e33514ba2652948e9fa59817d1381a601f3"}, @generic="c3a6e43e48f963259391e947f6e0152c0553d93a9c70fa4f1376e83475df4842ca4f21b546a17a7c0e7540a2", @typed={0x8, 0x15, 0x0, 0x0, @uid}, @generic="e469e04a77c06bddf70f1c25d09499ce91a3dc77e977b9255d93b06ca064d92a41839be931943dd43151cb4863f1892d3311d2ad7bd8da3c56d85b7675c55a478ff311614da1c977260b2a979062b9d7e409ae1f1a3c3bd776aefd473eefcb1b38088996be4ab2e7482c47c1f130fe14d7f3a906e1ec7f3ce89654d4fa6c7eb4ff5ce43f965fbd1e409fc3ca7fa5f5f7e37f7bf1e80d20ab9eb065812aa0bb8691f01c9a14df46ab6791e6f91c7760a60b9cbb0349f2b9086e47992d0cf9e68d702b4e1709", @nested={0x350, 0x4c, 0x0, 0x1, [@typed={0x4, 0x3}, @typed={0x8, 0x8b, 0x0, 0x0, @fd}, @typed={0xc, 0x1e, 0x0, 0x0, @u64=0x8000000000000000}, @typed={0x16, 0xe, 0x0, 0x0, @binary="91e2466809ba9981ae2983788f925a0f7db5"}, @generic="53d7c017b6aa9bbca8fa3bb0ec1fc26e28a9be73babc2047b5d0fe9f6053d907c2dc0c402a39af8e1affd55d8ea112656af6b2d8c8365ea81375332330e2f3594693a7b5afc6a0cd60d5ed469a0e1fce3ddb32d7aa3318cfbaf0d69d81a5ec23046053767243c46c5d2e1a5c8ea3a4e24845bce1265758ccf85a3d32e16c9b5b191a580b283edf3f6bdc09d7a8a83333e8c7f581b932015f221f99d2b7338494d0bda53a01ed45073c70fe9f", @generic="452e09845665c68d8139e15624e5b0a6c15992816259d844ac7923a592c7cf224dbb1a80210d99aa62ae03041a6714ebd76005e05394bd71bf7ff5f34a10d0b9becccad51632f6790882600ac59a09b6e6c8fedb646ecf437c8fe22e165ac7f70c8aa0bcda453ac84c67edee53f0fee21ffaab88b5784f6ed83118204b0b1a1d2fcca745ed8ba1fa1a6c110ff1825e36ab33fb998e50b2ae18ce7109e390d047cc848817429ee15bdea19226b88ffeffa8fb70ed0485260bf30c11c83113f66b466a935464d7c0e58de4833a45f41e055db92d4f6a00409493a46f093b79205c66765123e2a2e6889227b40851989ad83248c0ba9e879a1e819c0bb5eda7b2", @typed={0x8, 0x59, 0x0, 0x0, @uid}, @generic="00474dd4ad2dd791bca50f506248ea829efc6eb88a58e1b48b8ce44683f3c6b73c24568c636b1d4a9e48740ed165d1302e6b9dfe4910886781582a554f8f437b1f15eaf0b9df5c54304b8dbbd256555878e8f33315d59e8924b8b7013eb83a5da4cbe96ca1d4b6a90ca5414833631a52b1df2898101affa7fbdd7f37e8579e05f43ab1b38bfdb27a19", @generic="e5d4b7742821aac12ccd2f7ec93fd67d9ba3459fceed399e04d9dd4213d7526c2752bae4a8c1a66ec1c40bb02985edfd2d2277d47e9e874eff8db9e1c9e706846f546b0617a15fe9a809d793391e6c79a2fa1e06d2a842b3e96814faa35fb768160d7a9764d1a06ad97172a9369393d5a3649c468ccd3d04ab3140da9e640ddd723af9f2af57a01acc086b097228416a697ca1f5aadcdcae236761b85d34bf182f4be847706e26b6ae8304f002d1cd08de8e0a0b321cb0a43bb90e27e306576f83591399e0e5a26c299a36803ae0f24f1564d7b8b2512ca0ac387a3051f3da04"]}]}, 0x4f0}, {&(0x7f0000004100)={0x3650, 0x31, 0x8, 0x70bd2d, 0x25dfdbfe, "", [@nested={0x18, 0x77, 0x0, 0x1, [@typed={0x8, 0x99, 0x0, 0x0, @pid}, @typed={0xc, 0x28, 0x0, 0x0, @u64=0x10001}]}, @generic="bc44a21cc9d568655f2098a2bb119cab63281db2b86beefa56c94f77b71a12ba23096539fe2b1160a5b40421557b8fcd8aa25f606487fb7f1d8bdd9477eb3aa3bd2cfeda4037b8001d4cb2cbe92e6b85cebc696b97a0", @typed={0x8, 0x8c, 0x0, 0x0, @u32=0x6}, @typed={0x8, 0x95, 0x0, 0x0, @ipv4=@dev={0xac, 0x14, 0x14, 0x12}}, @nested={0x18a, 0x94, 0x0, 0x1, [@typed={0x8, 0xd, 0x0, 0x0, @ipv4=@empty}, @generic="13e12eba5099012bf22649315497ffe675b52109b7f88ded06bd36395ef782107481c1cad9b738ef7619862e617d7a6972d9836e9af8a06c8fa2e221e6cd462e4cca23db6d9a924038c3a2de3559ff3c2904d72b79fcef9820fd9091749a429804eace74565bbc9bd3d5745aedd582258d4e859040999f81e0333c7b8b7368db5ca011d4239f6d8cb5577f0802866fe0161b018a7c44e698dbfaf98700dd43cae04edf5209fa32bda2a4dd0f87b21f6ba340c3f99e58829c6328aa2a2cfcb70b379ea49bb258a310356cd541d9bf02a6a10452d9953c95cf4ac5f00d27a1ffc1722c", @generic="d6e9d21313086023ffda876a586f756718c46370842b8a1434edf43560c2cf15e40a9267349fbda286a588926b81498758040a9e1b67b49ad138f5915191cf2d1e59464a3b7c4e69a9dfca80e3a8", @typed={0x14, 0x5f, 0x0, 0x0, @ipv6=@rand_addr=' \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02'}, @typed={0x7, 0x3f, 0x0, 0x0, @str='#! '}, @generic="6d8f0507d0137a18b5f2f39930b7ee7b3077d8b2761fea14d8944ecdd39f034260198ffb348dacf4658d032519f00bfc9f6e"]}, @generic="ca7a7f3ea01c144754cb0a0fa6e5a1d7ba82b2e6378bc40f8027db8b402c706b30dc8f3b3d62b0e9046658fac263f0b45e4f77598e8ec99b714199793cd958a7cbc6f401641d6b58364656c3f44e735d8a87f945adfdfc6a54ca91af4f50f3542a290b52b360672630fc9ac966b65db67268c5abfa59a4f53504ed525368209c8205ca4bedee1282138db844509061249e0b544210097c8258b507c369ab8e31a00f955571bc8d23b600a405", @nested={0x1346, 0x10, 0x0, 0x1, [@typed={0x8, 0x53, 0x0, 0x0, @u32}, @typed={0x4, 0x6a}, @generic="bd290633c339c3490a89f380a56c0f85e86f669f7ddd204d378cd0dd4821c50c3ad0c8eaaba73323c70b7e8f8cc98f6a3a6649b3378c1bceb147c56cede73722c1659c4fdfa943b6efdbb123608f9b3bf923ca23602a00fda577d1d4414d7e5d5ac114959638640f4c25f97f1bdd4f2bb2db39fa1d0c9523573a5cdcc1bd7c2c5ae5c44faaa6f3668d0e6825c418fef5d10e78472a4aeddb2b074e1d94ae467839f6cd86085b3ac6f91be4179e9a67eed45c70eeb691cbc7af028257258db61dc5e55c5486fd366577cb57e8b10ecf5cedf19d92b3707e3ec7b09417e4e8686804d68ed8a2209fa26c6d89df19be0901d8698f970631eb32ceebceafce7dcd350f22fdf5850afaed68cca4c3b45f438773162d11d1dea178e4dcc2e912f7fedfb206c9ace9b8596fb7e06c9b5e2cca3c85978cf1dcf38ec230b793032a1c54b7148e6c1204cd866d73f028a67b1f55c727c935afc9f85e35a9890c6912dcc01313ac9aea0e7403954b734a71d783ae98ed55f5a2428a9ef113713eea8a187cbdd3f8822334c9d70fa295db79f1081148366a8caaf0826076187445f4e9d49825d15c4ea4b8967bc96a6744b1cd7db2add134b93077bdba893d53a972e0390ba9884d9949475e5d91bbb0679fc27dc4fd7488daabc8e387bd5ebd7954e51b6399ee9c41ee86089ba84a1964f47e0b186403cd3d4535599207671768e3840ef40dee2bbcf7b8a3b4f0fc19d9bcc151905bee53738e56aaa80542abdcad3f37b7f98f367fd99f037eb00d394e7e7735adfe83b895d7442b01c71d0f8fd234e99d1a07b82d39b284ac45a2eb51af0e911691aa76ac436c3493cc6cae121c68b80b12f02efcf8e98d4169bf9d0ab1d62161a8837a4b18c72fc23a211c5193939c8fe869b640a04975ad65fc9101cbb31983b7231dabb9af509789566801c860282bd3add3b96c5334385052ca97a684850f92499b80afceb89421403c3b86ff4cdb0d6ee5ca4f69dbcc06af36d2c54f2f65bd84c148b656dc81b4cc188d285c3eaa1ee339d88551bb4915cbc7e33e6de31134a766e36b3973e3c7af9937f8a2c584525bd15445a1701f8b234b2e067eebfdc96a5f197389836a63f37f71fe4a786f55414ba7f98b8c1020d199948ebc0e94a5eb2cc9334a1e424561a2513366df9db2d5129038f7e3ce6d35a7efa9913eeb76839650346b9eb67b31719b202cdcc725b3a76b324e6722fc991071c67cac407f291e3fced78b06264681861ca9d50113af4d2735d1219e791a8eb8afbf5dd9157ff6a81d94d737de1d90942be707d19745902faf8736bd0b7a7e0e7ce7b5f0539ce3828364b26ae2da5e34a9824499233ee4868e24d14ee0948bae130aade4c96364880cd907cddf78ed205cddcced38b607e47d802eeb234aa7917e306e86e08870e3c907b231618911bccd120509e290f60a9b02b43c4e2ef44b0575557255861502cb035bfa5cdfccbd0f408a95851603a861d1e61526145a1f337c0e76e3de1cf66fdc134a6fde4aed7aaa1fa782412c1b4f3ce6c78d5fc596cf3e7f44a1801257c5928aeb35849d8a73b7864cd9a1408e08d7208fb3950a2e5d71bd37b6782735c2dcf76d64a876beb1fe34a5cfb6a9f8dfd1553b13609500fd5157d385cb45246b61d2e9bc89aa61ed985c77e1cdad2f5ba1411ddaca6e40a3bdfd997b0ef2d49ce41b0329b98c9a7735f3c8e18198dc6c10a61b297fe4100acbe3ed1c9d8afd4ebde70c70fde51e41b099954e7fa305d282876dca7079b9d4766183115662c094cd18b7418b4b6ff2a75c4134b342dc37fd4003842c20b740532298537e10a5d95496d67d8a6a7a4a515162e03b51646ba7cf61a993b1910d3ea54719475fbafe3d539be13ea2827c71272d591db6a1482b25281637c1885a5ebb534fe8249754c21fad772697be3994c3d2a072d557f936b2160e80b3a4085fb54fb639bbb3178c42848232060978d6e01278011f20c14fec183efe3c047bbf92d991fc7fdd42fd149b9db6b1c867ddef09a8975b7e1c4f2f401e637953f32b83d2fb781ee19bf957bc4e4b668f6c8084224d237a24220306179d3a2b5317247572f200c066b3f716cc62ab0501b9dfccf79d69c4bfce705c6126feb594dd9e8aaf154428c09aa9ba668a5a05a5a0dc48f9c9a1851148cb8b2331c09ac857a898afdd1736c540e96822dde260ca3a85ec45d738059fcaaba2766b08387d84f8361ac545c35e3e7374afda97b40e6fd92cdefe212cdc3a2d2877e589d020b3eaf807b5c90180a9fa5d87b127f38f9b60679ee445f3c09a09ff6ddc6b729c348f061859b574a15a54300ff5db3ec2e492f53b66bae11c85fea16899dc513986864cc4f094f78ffdd4995bf689ecb6af9a67141abd17cf1929c5a1fb634c8007e622908abbef2110a63b64c5bceeed0b79775d743b0626b8bc880a515879c30029b73a9a74593c0019716bbf14f9dd514bca45299bf89f3709332a2cf7ac035310b352c3e28e3021a15918ead15fa22d3a156b7cc2748361dbc69530f99aa9629e76c407bb7621f663ceb5ddbe5b12fe4b71793075f4a3d6e60591a2c2f26a961e7308e36135535db1b713735126792e1e8e849a694a95e0c6e03e2053508985d05c3d3460e1c0aa52e56a0330f9c2c80f2b2cbbf8a5387ee9fee1b3d2de34e2bdf9bb1cf5eacd1430e07cf846e5bced85f1e753aa1adf482ab6d9a2a76d4b0189da0f83c674e059d89983a73b5eceabadb3a5e1a7363fba23fcaf92b81247146aeeb6fd3f556c9238d63cc892218615cab9c2025f13e857c2bc4ae57860f74dc1c92d962fef1db3fb410b3d5762f199e03c303bba951a8bed31b069fc19749de4245b41a9783b9d4ca5d0e0debf13e0421a5ce91f60aa7f50e1787c642453cdb10c11e39aa4107db99815e196880729e7c0c3e781df9825fc7a6cef0912723774be637af1dba89bedb3673da8c4ac4a63151072a82103c3b912de1129e60418b8cda3c8c229e53f9b2c6b9826f9f166cec925dfb9722a8c3576162a9df9506a60be0844f2bee621e60cdcc3139d19bace03d0f32194b579b7a74b8531c46ab5249fb72d17085852bcda907057b5dc242d67d930afa3af0d824f7e0c775c11b8a3b5696fe328df3a17abaed68647aea87c08d3fbf1ffef7692a6b3a83a2e56161ce2127877006e3d88d1b125233b458a42fc30e7784b741b5564dae78e91e878e6a52dc4334993ef2549283955b2c80e05f83f185878916463eda0332d5ec6fa03d3a417d9417150bab5a7356b5130cc450f814ceac15dcc06c3e96e5a5a8931e1215b3841bee0b297683a80e8e664bd01b9e7fe729113b2ce18df5ee9ab17bdfeb5d9f200d842a51dd2fdb071cb133e2d244a663d468bc006315c6533d05c8fdc92d1e155db688d5aea53d586b0f79bdbdaef7e399594cc2777af9eb67e33372dd6e7322975f16630cb1a9cacaaf28943a8392078ade91e33fbfbe4c1e03bac816a9b3912704d02f2ed53ae05392520e4d07061151ba6c938f85e80b5ba296ca13825c8e6be141f560e9ef98ef61cdb080436bd92564998f3617458926e3cce65517ff65c59a54cb5d80b16455d3c962b8c86d5fa7204b6ecd5dce04c9cc732fdbbf67e6dc50bca3ba0b53255b47491b3c5d6042ac298330d049a1f95629ef19673a6b03b0875580e110041d04cf4696d115d15cd5c4acde347d136995d8448e25dfa9076ae2e5f579ff9be1efc8c5ad75a9ff853ab579387964f61d5c499c54b3f08e50a4dc345581b12f3ee3afa4350a07953c0ed016508c7d95ce7f8e76b593e54e05ae131827f8eb5576263c9974d3d498f532c39aa790fdc4482b0c3ab56e771fc4a5dec854a9e9421ddb2470976a74955ac3cfc82780918f1a856a485dfed15631ffb3d8cd614bc721ece74f86f722b475c9c38ccc6a194cfbef8ac407bbf314ec5eba9057384d2cb0e6f936a363e822f990b66279c33e483725715e04b94fc69fddaad1b619bac6227fa44443bd0b7e77e5c3b9d70ee48f060abca9074c53e4d0598ff2105be2abe5bdbece1eec65a1ffcaf24cabce8f88fd2b9c5da0e9ad6905f614b70160c8127e9616c8940227553090813924df7d3271de5f68f2d7c717839c4fb12482ecbbab30591ded5cc8bf2a7681aca0842896d4d776bdb65abd8bb11a19f98ce4247c2434cb6e824413dc7726e93009b5fed9e027a20c74c2630496796d2577d77bf4e0f96eef1cfa04eae42d61df3b6a703c7f23942239b47889f3015c9576ccbdf9ea19ff28060cf6b4ef8b4c7640485b373f15206a2ce56a554bd4d026897e13e407ab081efe8b5e0a9fbae09dd016e6993ac491c14d4d178949fe60e5915f0e2eabc9778f9b0bcf61185a3a3e514c01c621d5166e67c60989ea6927c92ac0fe8d6e77c87d294aa83d1a44e31f2827917bd2d97a952336ebcb778a1fcd9a3c7ff3eec0e4ff067956fd10363173845f3eccdc4b194694537a1e31814d24e32032929db9176aa5503809b3ad2f80817e34e611c51f627517b7dc7fa1cc028db09835aba41e1d8ecc2c1a6175c6e83dfa079e11fc79499451fd1b375a1cdeb9f941d0c9b8ed4f822ec1e83f4bf2959edccd89513a85b3930d78659fc061b760fc89af202ec7d92c3159b488c327ad21e4628c796880b4cbeb4afc530293747f3b9dc16d6bae953e018e5f981f6f87c787325c0f03d5cd8d65ab12b06e834547c95904646f35392bb1e2624d09b6986632de18a40611d58e6c51f160cc23079fad1323a319249d659a8a8f53ade5e423440dfa6cf93bc0292c5f948bc4e54a4775545262cc001df90b59d56c3dfc9990c874a719602ee9a8563ff6dabb07f79d3f25514c709f5cf3afc1e1f4054b2433c7af38ad4a621ea70935185343af3fdc742a2a93c0933e5eeac9a79771ee77c10d49956137f1cef73c353200762cc858f61c763428b51d5bbbb72dabc88331be273bb3af1921e027493645848f0d17287b9868b9aa5873105bcc92dc116d124b89836e8696dc7e23d9bd0ade92cca481466096c134c712f8dfc7b2233c42ad2a6e058df90f782367910624d8f544e75f338c23557df2e2b792409f40124dceafb805d82808817eeda43bd57399af2d65242cb822932e17d27521417cad42735feae44248c3734182cfefd65433d47f524728159575182a5b7726ff5f7c62b5513e0e9f27048d5d6b108493c3dd138de89ee7207090efb9f567c7485104f969339ef5472652eef7230d39bc4b2bce65e522451da075b79b695f365f5d4652d7677dfffcf40e55b6653ab63131b74f78a441012856563708385e39457841f9b12787c2e8c14d6f5960c5c9f8bef8ed7651f0c9c97cc673c7306f52f9f2b40f9e855d51aa3dd91ccd8ac8383c44a813eac2d3cf3f0294a46b4a5ff42e70958afaae88e472934a7e6b74e5eb0f1c301afca66c4ecf46e65efa4229894570cb9ad3b42cccdd6ddb2185f03ff7d05fb1d9bebb396725f6ffe1bda03758c4a89b46c101d7a09620b347ea032d7184441021c1aeedbc41ade758a143012f77f5c3cb9bf6949337197b3295900699d76de2b1aa7eaa4272fe5c3db90f5d82d1ad7a6579c0c6a78b2692767e2657592814741c890646c2a8605af04481bd8c8d5a311ff1a9f31cb206dc45e0b7c8e3b2244280aa4cbbc052c9042c5ec1bf00127f2e40e04128f82a98854fef5e68550400703986f05ce90f6950aa62f922a7cdfdfa1d214e01297713eee52ac07577b63d91f3ec02bf87e56ac1076bb8547fcfb", @generic="4f80c693066f453aba77f81f494ad5de3f0bb0f189b5857ca173c1396c7af61d923d22ffde868c67643887f0dc47dd2b34f4c858687b65e31e1bcd2d3f1bcc415384e97edbcd36e05976d422767654fd817626e71daf7d640b9caef94c096bd4c3fb47ba551261ab0425935f87d3aab2acf82c7e8a50c6dea37e095e9f9aba1616274826a8b8fdb1b6d4b33066b4ca7daadfd8eda2e44aa58e347a8d5d556f76c73ea890b6bd39c83147183d0c13ea06238c218b2271f069167eeb8433e86f7d1d84b4ae7575651977f5642c3ef92bc398f284978cb883f455725a0a618f9e96d25f5a41f624857dd6032eef2f9bc600243931ed9ecdb7a4e79f6c4d4fc9", @generic="a02ac7bf3c0876ca66e95a176fdabb425f934cf86ae0c9c09d0c4fa369e6e856bb3a3c0660aa1b88fe124c28fb525b3c9edab9cb45c2ff6d5a68261c1d41c6810a0f6b35f9483d90a19e9177c05a796f9077daa82514eb8558b8bc2ccf26ffe610be501bc49535b36a4ca0bc4db8363b87e73b28ba0106ce9cb9ea6b4080e5f479c6025c3d6925e2a2fb0f5abc35a6c9ac9aa896a6d4d7b52c61feb91b21da9a25e93bf9d763ebd8654cbecf5f728965a303c36f2c", @generic="433e979948cdaa706dc09323d59b496a801c61625f031768c2ace4943850f60f4f227e04e551c7d0036b3600b31f6659ca04dc52a2582efb3f89e046a628ce98aecc0492409ae0a05604515a622bc32076db8e9178d4ab6171a556570202f592ba67104cfc7edecdd2e13cd44d68d6af45ee4c57a70155ed244b30d21006daaeb37bf12cc9cd663b4f354cf7b9976e06c5d5cc003b5b01f179896618467003c3d89e9a74a8beaafa35247e49ed60cf627e77020602ad650b96024705a04137a874a3b81ab26b4a735d86c6c2b522663e558d9632496cb5dabcd12e7ec1218840afcf3a2b27fd892ab32b7575cc862d4e8a4fd7740e18a351", @generic="ea763b6f8786", @typed={0x8, 0x76, 0x0, 0x0, @u32=0x5}, @typed={0x8, 0x22, 0x0, 0x0, @u32=0x3}, @generic="1a3fb0e9549d92ea07cac37f8933ce63fba6a8f00d1b4b59ad9ad3586232c1181f6da4d0409d7f6e5a15b0ecd8177c47be6dc1e226c34acc42410cfa0e7701f5f23015201bccbd1c79eaf6e7a60977c016d1526a6df45841bd480a4f4fdbf53dd4b447654255578ddddedca2aa8df2c9f29f6d3cd3"]}, @typed={0x14, 0x7, 0x0, 0x0, @ipv6=@mcast1}, @nested={0x202c, 0x4, 0x0, 0x1, [@typed={0xd, 0x69, 0x0, 0x0, @str='$]!:[\'+}\x00'}, @typed={0x1004, 0x26, 0x0, 0x0, @binary="10eec0ea280627c6ffb129df4047fb85a06c1aa28dd39b8ab80c8507467ba79a0dfeb30f1127ca80abefd9d4261be680716fbc6ce88613d7d7a6e82c760f06eb29eef3689f48874d0f96e1c382397732c33fc6d9597ffa7169a0332bc139a455a9fd208fe3e9e57f363113e79228c1a44a7ec27168406ce5cd679d53488620e427b72b2e5405fc5f9336c25d6e5c6d7b195eb4dcfab5541cb32cc80d75eb8c5a3f90193c886a2114be9a884de6a1f2332439133e27ace87e26268caae035ea27d36948a3bf973645727ef5e902a49d69b331824773994ca5164ff4d1bc7fb3872d31d4ebca576a500066655bbb25c36d42d8286c76a619af8b6a7cb0acec9481d34af71e9a0e0ba3f3bc5595737ce40f7501674950414d98cac28ac879b5efba2f9aa23e20d7c1fb591b8afd094d7d54001895b55716c80bb753637c8d41ce51cae5cd4332ab3c353e16fb75c3a85872a104abc4961e0aa8d54f46b1feae6075bceef2f24fce4c809eb82a82033547bdf8fbbe2c96ce5f20eebeb772805a64f6d9cf2b20c93e11453140f34119b4ca1a5ec954177838efb19e5baaa1196a7ae395580f9d3097483f237eef4896a9baef56e539094aa4b67bc1596ee1af18630b5392dde47ec52b56d83bd77982f7b231e6412a2bca83e10287b48aeff87aa5935be347a3633f56681e0046dc78c6f7739efe833e59bfafab0e4871b5aa971a0142f71f8eafcbfda78c69461f5b39df5e7372b733f2b3bd6922022f68bb0884b51f171ea992f3630bd2346fbd7e830453bb3928255b0901781cc68a5819753a6858215d68bd00c684d5ec0e244741aeddf9b433e4953e2ff1c5febc781f761b686061efd4cbbee40a5aff2f87f8f93301e2d79080c5eb59048d23bf70fdaad055bbab0acf53614e0b0fc792de8e66dad665963c13a7dd16e8eb1a6d69501fa0f4095132f460ff85fe47801e2ddb919d3e225c32681c2a08b1df57aab0813bfe63ed76751df32f23ecacd541010a71bcae8916034524ebabf9bd768f1fffa2dcd9e9116f7f30c7e275394911f94c65f5bff10fbf4159b85c0994e70ebbc704213704b73a0ca97ad36e46969558579dcb3f3a5635473b974924cd191327544561b2c7bc1577ca29f930e717eb320c597237e7a25e9142de98795ac7052c3d310b32849024f37cc55a01df8221457edce37c40818ed40ed7af52ee529dbb11dcc28127f1d2cef3a1e223ad1b9430f50386906ac8ab3f161856afcce4c7a93ce8b3639c8515b05786e6b21f1720c9bd7bffd16d506062e51c6c055f73441f205db8a5bc33da6fbfe31254ffe7903011a99141f8b516c9126aa75b4e8d311cdb3b06592c154e823768aba66909d9ade6c9f8f65eaba20beb534f12aa6448c15b6e67967db437cc843630dd3ab0b7a8d3c9bd3ba33e29e7b71f7881d1e3eb2b63c6488a1701adf0d9a1f78087832fa56482f4771d3ec185b15ff595c8b5774570baa81ed1173ab40d66ed225fa492951f067747f3f2e8e0faac2b80e47207e3acb2ecce1a2f46c2d19847b77aa0a37ef235a4a93008ce7db32c71684a5037ccdc99dd257360103d9f0c7e3cbb3571f9f4f03a36f3df24c582b10a0425dc834f3879b9a5a789713d32792329e33ba1452c413b6f9cc5aa71ba59ff0379a9703a3ef8e357f2cbb91e63aca042f26d988c60ce3ab259ef1770adefd5d852bc5fafa72d12f2a3e889e188f774015a7d142a150f3de931bc95146d906c38ca297ad758b71eb4a1697af66c3a5e5fd93c43d6273645cd2cbac8dfa99a1f1cf7a71a07c874bdc68dd63289e63d1ec001ef10c6bc01046be74a69c91ae153d6d73234b1e062e5e1ee925bb4b4d44ca26a09b681b62d683135d45d5f9fb860ab98eec49138fbe06059627218a7dd2264caf66b5349a271dededa71a07a2da2dc9e885c6b82146ca9616b1478f8879bb7fee0a88e69a7d8177ba9a84bd4e9304c56ce5cc63e55a302f30957efef7be1f21c110f8fa9abb1a148061ab31ccbf37ef0e967de773873c0ffc1b92736145ade859aeeee16217424d9809fc8f613fe8e949028a968234f62a32cbe0a118f1282f3e6ec54b32dbe7cba4e279ff9dfa641e622265112c972da6e466dec9a53428b50ebf2385357888c0502cc6012d93fbdd31e5c6334e0dc0100062987c3bcee1e0387d82cb159b5b67ba39f45f38e996c4e330beb02c35b318035cca031c688bde0b1df1a7ad959448c909e470703e7f706b2ca11f01ec4e6e56da1e195cf959a5c4f97985bf536839e824a2938205ef348b521cf1a922d89df8bca4a4172ae2a5fce2dc1ed778b89e722f0c5d8ee1c636c7625194911c6a92fb55db5b2c20b90f1e8647bcdb69d96f4dbea519c1afbc56424f4bc429579ccd08fc9ec4c64826d1f93675a131ce2a3c8ce3f6c54a87ca52ebc66cb87f347e884e9bd3fd57550bac306891d9aa35a4342c8f240bb1c94385f763624ce6cd365062c8b5a191d088a7d876853924d380fa019b827459957bc35c025313036af7b58be212f0db31c322b6baabc316e9335f11776d776989540da63367f59c34c589e0584283c6890f4ea635d21fa3fac8ced154939309dff53e65b8838d2998efaae07870b816663c1344989f24835ebf0a0ae4c4b7cb4eb8d4276575f0c19bcc359d10b316757e060e2587bddd01ffd14d44854e52b9549855c94800e24d3587a1b43d7a49a2590876c23f8dc47b243fc7d8d14fc6e126da37fb5799c76ad17f6f966f4922ebe7fc5bcd0783e5eb37458a6412f4319ee4021f67bffea6e80c5430fdd4638f4ef5f7b86d74bd0735f0bc2912d34922806c1a68820cdc8733da54d4d99b7e1bb9d0eb1496e06dfd07f0e1b6c21e0a8250c9bf873e566df8b050d37d15941ef3ca078dc5623af3b53a55b88fcf88617430c41b1c2541413df81d13662af228866891a8827c6b7e9ac174007a7c6a57e07e3a9761611b470ea6d1a1bd35b02e48d73294f3ed994db905a2593fd7b689ae5c12437df90adbdd32437ebf7ca350f076ad2ef141aa308b9c1d03866f916ffbf00c42e93ee437da12c507e4aa0e986abb45229363caefbe87d18e50aba8bdfa8811c973a3634e9fa0b9828691e6faacc03accaf3174dd592ad31b57944902db28db2c77bdd9f92d08c3d53e3718c70b5c718a0f0243ced61fa09ad6500a21e261152ccf2acf195e13a093b78efe2e7543496b28eed9b9b7e10d49a2937eb53fcb66373d53e2b4875494bdfaa350d2c0ca87aba8cdb3f7afc6da9d60610c69597b946a799bdef362f9a8ee071ba375c8c93c605b84b71aeb639c513ff55efdea4fbc179ff5dea85567f7d46333320be9c0f6d27fe7e6547be227cba358b83487ee42f5e10640ac4123fd67d511eab764d01f34fa28a4dde2c665a28036ea1aca79cabac524b99fccb051a6f3eeb21536850f4ae91f10f3ebd4eb83e9494da1924ab073985bf7480947677c6ed92c60d450c4351e1c3287d1fe5fc39d7b569eb5eb890ef414b765aee4f718b878475f88592ed9520f6bb70b2cea1119c3856b806ff4d7434713360c0ba7425f13da43d43b798bfefeb883947243686c98deb50f9c2d71667088912499507cd011ec91a127c7c1f434e7d89fce9a2f220d979271b4cc971368bc5aefac1b1b8b34c48ba85f634cc7ee48d63963b5d200da64d60624e394f258514e4ec5c8712adb637aa7bb3ad0be8053dbb05649bea6714db46cb3807e10c655ef14ccb5a4d14767abfe9f6c18682bf9f5d6a6ec8d375d836ee828fb3b6a3b79d2d60fd9dbdd826f60835c225d272093546140c0dc135e369293612c70bc04661f9abd7698b999779688d4dfa4612199b05dbba097542552dabcf694322e18e81fba156d1f4030dc5715530a7a509e454301cbe87d7859d8435f849c1818363745c9c034cebb285be026c582884b0e36c1bddd99f7fe07c1c4dbf604fd3f624d48345e04d9fa53837b854a68dcb2db5dd1cbf7a4b8011924c7ca7c444f7fce3e04d16393d8563ad4a2fef34ecba95727927a9046f17b8566b6fcbd8c32779707a6f9d9f76d50fef8da194f800c73cc12092f5c0f7b14a980a3361de26834a134eb2055043461f7441e8ee45f394bd4be05a5ff5a2605dda0c11782f9d5a4372f6cff7f84e3a8dad34c28148946b021466546725119c12c7049b03f73c24305d8f8bc5925240f456a20538a439c68a258d1f1c27929f161ddff74cbdac0aaa1cdeae45b2385b93edcb7aadc2b9e5ac2f6dad62fc7ed562a4b96d0e01ddf9f1a3e11f36362ec5c3a7f0ec85edf7527293a231aa5441e1808c9fe2ad7492ae432564019255a08c12a68a615a02b130971ea1f369421925550f239570e595ce16a40fcc6b3006d205868ad10e955a64eeb854931cb74bd9bdb965502e3ca9212bc9d339e11e2960545d94af276497b21202a9c194d3263f62fb4087646b0f68cfc6d36928fd8df8d014b21cb0defe6ad1c79d4a9ba7426420999b5c35b8b6292082fada1c10ae55ada6959208adfc1c3fd1d58c0a54185be1da0d9610ba98baf016f952b0ee38be534972804eb003252a6d1894f0617cb955d09c6d3d4086452be816daeb7775a61690dcc8ee0e432fca4a7a481c0d85ad34cadb4e0960795c9d4a5ccdabffb957fc59acef2f34d4500326e9f52b43f863a92cd027fe4df8b899d48c4d0914e0db78f9f42d30a7bba5c7de2a3fbb9bcae2e23f5159cb9d2593bf181a434a3b361d321a8079c6178b9fb26ad42b8bf2088b708abc831acf16ddc86214f09668a3fd5f7cbbd1a957c05a0497e4b113809e09dd70ab530f76378dd7e785eea1e172cc789ab3942df1373d1dade393e4747e9f8a1c905ae1f4d5fa4eb215b484d82ca5116bc88a9122724c9c99aaa3df8c2767ab1816606bcbd4758cb940722c812a274a6e7ca18f1182d57b11a9ef12a8a083575d7dd45f75334e3978201de534a79c2e96b88ab330d443910b4376d08dd7d38122a255f88b6887a1c01a71406a47135b688d5c979ecc7b71039ec3f3bc9ccc1e00f4b0e76c5805ad93b2af887e84edb94b3209cb381382de26f3a9759cb3f7f857f141eaddb266c2a58c6ff2f3f65ecfaeb9ff1b8483d1c22cc67d3fc91199c1ec878f7aa3117c42938a0b62b9b8cb3360764170122288706a94bf45d849248aff5e69463c05037f68987606b30db0af67d20556c1a7ceac79daee31bb07c7db9b2f2379c3c04de12d794ec3ea47832615ee9fef3b58c4b3a245316ccfb2ea5f76bcf6c73a439d156fd38bc108ec7cc53fc83dc5037ee777367ad54553c18c228af6e10d9f79519b4f7ad2eca0a070980a7a2db25de00516d6c3bbc52898c835c9f3ee42695af5c68be660867ee3857a16b5aa02f5a7a759764d24c658c528ee68ff9d172f58bc926b554fb22c7bef1691814361eb918373c628a3100437d9be56b97049527d3f6bf500df20ea42c988d420df4f2e0c7cb03ceda87d78a12a28aafcc36095e1201c1efa717cf8200df26edcb6c34ff315a32b89b64e5b1d61f80388758f68cabb5dfdb89dd426b3c11d7f2d8f1101e1cde2aaa85412d195efef01c09da37b5466e7c7bc242967d839e15c15f63b046273b8b6590541cc76b70ee298086e06192126c893dc9a0fcd12661b48654e88944735fa21e60b5f12caec7093d7b4fd9155cefd251cb7b81f3926d61f23d344dc97ff7ee25f466a402b8d28655b2288582eb72c8b4794f1ceaa33fd548410b5b6f68e702fc5ecaee68e9d90772076d76a34609ae87aa6f07f10"}, @typed={0x1004, 0x4, 0x0, 0x0, @binary="4c9365e1b99539cafc93e40dd7b8a0656f77e06ca01e950e4aae6bd06df7bdc3ec2a2a196ab8b5ad3dcaae2d13c58cfc93b60793612c105223bbe2ed2d18c43e0ee71fc3231c54a1a855da1134d249d7ad2a0c54ada2eba9e151a62d3a871dab190fc0186cc367a4726f1a9a470930649ea901dc22264d1793ad45fced3a009a2569eb90994d81a40743c1bcbefa98a9188524091b918f898cba87d9b4e4345712c5848ea2538c14b0ce7028991910c231bf76e9995bcb3ecea9eb56d7be7ffbd556fa99e79392bdc579cda64a8dee66417bc754977a4c0bd1fc5df71747db2d21eff41cbe5352cb6661fbf1ced92aabce65ce3c23f4bee26adb6f2ecd687ca29b8d4d172e28eba023490e460898c447ddc6c1173e999a4fa1bf4fa9357080d62f626523d6fa135c9a310ff24f507c37daa2d42c0e125f0e248aef7486d8ee7b6593541ec2bae9a3bd790b9419a808dad08f2b80aeb49fc768d0f9eb166cac072d669a56a62be1569ee230c6187d51a5803aee1046794018e060189813e934590f9a6d6271034769358d1ec1f40d48f309011faedf8d6224cf90f91017db5c91ada0fab472e64cec8744585cff67a651d64054959084206ebe582283f92e0900c4f25d1964edc2b3058bce15bdb0f76158eb4c359d49064b741e7e0f72c044fc49d984365bb09c73d365d8f806e45a4248695543a4529b88c587470750e0a3171c613d823ab44a8ddf8ff826e6eda1a378bab1205c0bc0ea1f06a854b048c451704222e3a097c8517c2c07e39b031bb4d484e675d88cf1a1cdc9f6fba035de1ac851c24a047946c0a80d04966406e886e66f2203b8c78fb8de17e103e7c97b546e3570e415e87f9b5b4cddd0a3b7d21b7e0ed23b743c392c0509b39dbed6413ecfaa5358c8fc267de38e8e2b5d9fe5c80f162ece8f82566b24187b4bdab94c56db28c542cb342f0b7aaaa22ee360b209bcbaf4a8565d8f2e3d2d7293e41c13bf619658c159f82893ded9f36a8c2a93a7a9c5ce7f40b20455604b87bc50be4b1dca437be6831fee0d8e35da3c090b6a5d816ee29fc7ea3a282d5b3e8ddc3a3b6c07cdf8f79630664b5736afe12161a3d5d6924bd7de05f21937cbe5d0e466e977fa725398b3efc88564ecacf2e29eaf05cd3897b95eeca39a4ed9231e2b77313ca3c9a0a5c82b575703c8bac5d4cbe1ad80968ee63fc35bb19c183bd1fee1a883143387512ca367b301ec5ee99bac11591b2f4daf7eb038205baaf61341248d017ee332a9368898d45d14fb02ed3a0e1be0a4d7616b280b4b08d30314034c1b4a5b6335f189792514e430ad94f4e3941ce507a874f76afc97db7395b89a8f67e07057109ad338b42d116ef48a1035c193fa4fa79e3a991008f3f6710753c083a24116284083881b3ae37e0d7cca98b2541feaf25b74e643be9e8846f218659c17fd7b22558614d6c16c86b73df4828c11ac5c62887bb79f25a479b9a90f2a6167c734acb0c4a0b9f8a405e1a380dcc8416246a8f1e2ba3a3da4e2fb1fb3279af8de526ff269fa5f77ff27d22a0285b979a6f90c45f864b31cdf4bd00ba577e2fe0fbe18c89b89503b40943e22bfa085213ec554e35cec8e87b9a9410e315ffd819b3d13ed474601b44d628d91175cd9618665580a35272a659d540f9c38cc55d66ca3efdbdbb2e9b4580c6a671aecd202ba7e66edb81931042cc533955e7037035256ebbe5c760657e4b944396f6cabe6ef439285d70d65ba36c40bb2605174913e4da8709b6117f4978221cea7398892b870e5b7c3daf340535ecae73617ee8d7ce42dbd90a3f1be6840b62952cb6e58d1f22479b2154213efde199b5a0742eda8051dbb81a34b7dfe98e445d3a2d10a2d96b3ed62c9f619432bc13161cbf99384de2d2b10d0c8c8737f1f052a1d841b67c1f136dca3228dd23eb4ae653df2c1469ef0a3755eb00a538e2cf16c484cb9915a95f7ce8ea2870726642289a8187f93915e08d22572f373659c23ade065cbba32f771daeae1508e7180b90848997d2a260a21368d7f2d2e5819e4671a7ad66dfb892e9f376386cc94de6849c36fe63641d0939120cc32f18aafce416fc95333a0066a6e94976201dacd855efc6e29c921ecc90d6fe1c3cf32c6f9ed6f6c543616af1398a6ac1fc79a96241ac284e4f6ee616f30e8eed87000f3bfcbca33d495255925288c3bcd31fef951b9baacdeafe18df7b267425481e3692ad11c9b3c3d50a3585591ac3f736f9f38065645132155d637757d04da9850a88cc91420197e773aa111548f6750b971386ac73481978916784bb2658f148a7ca831f24f9f1da9276c5a158454840c373c1bceb013ed3d0a5a978786478de8c9e5ff51e97bc63c8341c30c6217d56f844f4a48d9431d2925606b3a61e077609c823ec1c13f76366b0285eaa95e79ff63bf5b4a18899486c325b8ef72a31a0ab69d09cee4adac8a242c7f20a785dc3299e12e3ef16591468e4db17049e5c48f6be4e2375ef48f222b21f037d57f18dfceffc64245d9f825f18ac5825a042c3102b110ecc43e625cdb198861ec1a9da495512f03ce862b80e4b59b2fe32086763868be64a09b2142676405182179188d95ca7b45add98ae5c03fb96c8643c1943e46718fe9fd83060c991168be57386338dea3b8a29dfeab35ca450312f7a807e2c303d91f0674bb03c98d8ec6546d2e25d6b073e0f7a43b65a80fa59282ec69520bdeedd8ddaaa12a6c248ea0ca724f1a2bb1f9e56e59f36794128bd5d596f575af6ccf6340050f6001048dc8341f2b0f01c0308308de66d9e16136f4fdeda5f1d008a803a9e32a7d92b35b777aca7f2d4046214e783f0a92f752f2131beb785646c19309ac9888b7683892c0d7564548742c7cb4881b994462ae706503d732c335194019b3d4815490c1290826990a9a4b7580d3fa8032f06d3a5494dea54d73d6c1f956012631eb524a3391e4fa54fc96652c8e6abc0e0b75d3e6dbf0abde878339e79ae4ef39f959e5658c4dde58b95908f673dfebe5763d8200402f582c2203a9d68c37c257d05d5781d044f6a1589b7eb0b7cfe801b3b531d0e24838d758f7f34baf1d474893ba3abce406f28ca159612400fa3380d99fea2968391b1adbe43fd11263e5949579e9a1f3268f072ddf76a151fbca1c87abb46a1ddcbe9bd440f40fd0a0b206d334f6e68a790924e10c9eb8bcf1e7ee8415193aa2046293652c91309c14b4c34c5504b509e98b70c7a18e9bd727d5bde923a39a733838f2c88636eebb2bbe879cca4c58cede033bf11f7f31caad3998a70d8034cd233150a8a41a5a2e7f84a55f6f8de1da70103e2ea115b35ee848b47f9cc3931855736638decec3211d8ce82ffcaca966f3d07ade4ec67fc19209e1c4bedb406ab87fe009ca46cd35d63cc7fefe9e6f1bded3ed7d5206775db8b8a60d6c8c3aa157ba2e0a17001e4bd9124f65b8cd5249fad6cea1d33da124390307874c27c0ffa69b30d213e84b2284013f1aea08a980a19c53f97e9ce16e100e1b5fda7cfb999b2e71cb31b896a7c5b7cee6143a7bbe8a0be47284b3d40774dc17afe3da896977e25b30c0fa77966692115ba8a3ba0415547dbe1625769ef6071c575ee1da8ba0de001f3470f2dcdd3636bfeeb16bce926347f488b7b9f79a919868fe004155bb84f40cfac783daaed7b62a5a68f8ae97a35f90eec668d36e8a179cb262a1fcc6e86785f0351f5ff671ff1db82e846b6f9abbdb655a824c3cb7d04ade6c12ac3d2593b0f78fdf3296145e7286e8e46603c31a2b7f4b19c7b5092c353953324dc02b47c33f4c5d7984e9a3f957a01e1f9cabb6c470438e8807e71dc4399fa943f5d95e51dabbd7851473fc616db2b40eddfe5489db095c2b0d69777d9ceb49d9d9cdbe57a25529193168df79b65c4d3f8f3c860d49e170658fc66b8317e51cc4f76163db65cf63f24ebc6fd2ea9fbe8f5b0384b591e4fb72c9e76bb146d10cf31f302f61fee838d9848b9d9640ffd4001aa70bdf96d5c1eea7fabde27358ce478d2d3887e998875aa9a065cc5fe48aba5fe06501abb28374c19c5ccb44940afd112a088c211e637d8731d66ca0676c8a8836b008148b0d4635b35a23c052a2bda3626342115316ad0ac3539a96373bf1b07917b22f28598c6b2ebb6bf3bbb50898536bbf46d97a984c5a689cc21a2ea35eff5ae8d9f3aa45c139693fbdebb9739203e394923365ab15f0171a71e2059d1470b7ac175a7c698ef538bb3cad1c6c8ebd081fcfa83e3267d568761c0c0627adc64b115e19b2e1d1d8754c05f0fda4fabd479d0e672c4977df2b24516a2f179ac503371806f49df55644bcf9aa9e652a8cd6979ddd067f73090156baef52ad8f7329adfde05748e89fea77133c98702e8c733d982c7d86a575e0d21cef175853719fbc3bbd9ffb36c15048a6e84d029dbb1026fb2349e907d3f8056fff991f518ee91fb198bd5a85a489344e7bf20c698856e4790768ea7c6c7348d42c213421eb57ebf9ca4ab03fa32c529bd78cf775a4851179b3030764225915d3eb6a4cabf7e35a85aeace82263f71aff6417f48bc5844a1ef853a1b6c156216e4257280e4ede49ef1580a262e63af587848f43c3a720ae3bf68c127ded68bea321142030a9e9876be970f43b940aa1c94f59c653f48a8b5dbc9337cb74d1cdec240f9a4fb082b7e19cf0343195a364c3fbae25f0f816ddf2f08b0911f2b7b16faeb97c637f15b2a1f90814e9ea869e4103c92b7b4ecbcb0326f989b1c818a1d4607c28adb599bc1e08920c4c11f9f8b739b30ebab64d048c9fb0eeb8a6933ae76577a9062170a20af69512d173853ed77bf2b95081f0e808ea7241e5223aca34237b0cb47568ecba3af50fe53a4ee6a1a274ae62597240838125f451a56f8b372692ecd7dba531c4fd28c5a4c1ed9eaadee87b77b4830579e82c341af9d098826bef9f2b3cce99d62b3e885511e4177411fffa8b7191f09c06c38b66b681d12dc2753fbdda5a50d79483f84139585b795829ac749b682f61e582bc4fa431cdbcf7f2b7c6aedd637e0802547035b48fb9a597a29d334515eb4748062449b01546c998c295a8a52e376f591afe49adf45e8b1a473a6b9fe55be02c70ea5161a2ab472b76f2c6602ae5b508ca6126141e92d10476e5f453a8244b520aeb78354a62e7504ed3bdee660b813ca644fd638de486caa135ddb90d7f1176081bd2ca3f2e2929822d792eedc860335be1d09a4e07cd93aa9827b2001938c19b84c50d6bd6b8fbc71f0216aefe227d5bb5a1ed167485282fa7e801a1540de72f989cc65d9d44fe3a0a61d3359e3642b1d9a42d1e1375785828812147a03e16bbf5c85ab943954520e5e25d8abbe7b6032e92091ca8b9b9ebe343ecd403da9ce81b9b4f2c47e3a5d4a458f6edde0ecea41f46bf3032d3ec7e7d4af8fb5cd1a9b916c65723d110cea6f17c7c352a6be9abec5f3f3799d50d5b69b556e024c482d7076708501672ac4bf024f30acfcbd46804f5f32af2040e7f6a7d52bde679edd33f33f30042a16c542768ac3545158084a08fddf846100c4047ea9be156968a4e98fcafd23b9e032e204b905b0788265ff3a2710b42f755a4d1f8453214c2e9e423f9716eae83f5406032e4d008ab440eac7ebec5a489c6b67d6d07a24dae11360248dcd47beb2e2f51178a0c98e920b8a31edb9dd890fd4ebbfa49dc1f117828d3146f62428e812dcd906edc82964efbf644aefb5bb743a7795822e10be0db08a89e1e41423ba591c68974b0d12b9f40a9"}, @typed={0x8, 0x13, 0x0, 0x0, @fd=r4}, @typed={0x8, 0x68, 0x0, 0x0, @pid}]}]}, 0x3650}], 0x5, &(0x7f0000007840)=[@rights={{0x20, 0x1, 0x1, [0xffffffffffffffff, 0xffffffffffffffff, r4, 0xffffffffffffffff]}}, @rights={{0x28, 0x1, 0x1, [r8, r9, 0xffffffffffffffff, r0, r5, r3]}}], 0x48, 0x8080}, 0x2000880d) [ 1983.718687][T28491] bond975: entered promiscuous mode [ 1983.726315][T28491] 8021q: adding VLAN 0 to HW filter on device bond975 [ 1983.845539][T28492] bond975: (slave bridge933): making interface the new active one [ 1983.859889][T28492] bridge933: entered promiscuous mode [ 1983.880489][T28492] bond975: (slave bridge933): Enslaving as an active interface with an up link 01:55:33 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0xe203, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1983.897069][T28502] netlink: 'syz-executor.2': attribute type 1 has an invalid length. [ 1984.020769][T28502] bond923: entered promiscuous mode [ 1984.027300][T28502] 8021q: adding VLAN 0 to HW filter on device bond923 01:55:33 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x132, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1984.075127][T28503] bond923: (slave bridge890): making interface the new active one [ 1984.083533][T28503] bridge890: entered promiscuous mode [ 1984.095052][T28503] bond923: (slave bridge890): Enslaving as an active interface with an up link [ 1984.130035][T28508] netlink: 'syz-executor.0': attribute type 1 has an invalid length. [ 1984.206990][T28508] bond1028: entered promiscuous mode [ 1984.222518][T28508] 8021q: adding VLAN 0 to HW filter on device bond1028 [ 1984.239660][T28511] netlink: 'syz-executor.4': attribute type 1 has an invalid length. [ 1984.261805][T28511] bond365 (uninitialized): Released all slaves [ 1984.366244][T28512] bond1028: (slave bridge992): making interface the new active one [ 1984.374735][T28512] bridge992: entered promiscuous mode [ 1984.391564][T28512] bond1028: (slave bridge992): Enslaving as an active interface with an up link 01:55:34 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x1f00, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) 01:55:34 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, 0x0, &(0x7f0000000080)) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x262, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8}]}, 0x4c}}, 0x0) [ 1984.458592][T28530] netlink: 'syz-executor.3': attribute type 1 has an invalid length. [ 1984.519307][T28530] bond976: entered promiscuous mode [ 1984.525136][T28530] 8021q: adding VLAN 0 to HW filter on device bond976 01:55:34 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='blkio.bfq.time_recursive\x00', 0x275a, 0x0) ioctl$FS_IOC_READ_VERITY_METADATA(r0, 0xc0286687, &(0x7f0000000000)={0x2, 0x8000, 0xdd, &(0x7f0000000100)=""/221}) (async) r1 = accept4$nfc_llcp(r0, &(0x7f0000000200), &(0x7f0000000080)=0x60, 0x0) r2 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r2, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) (async) r3 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r3, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) (async, rerun: 32) listen(r3, 0x0) (async, rerun: 32) r4 = accept4(r3, 0x0, 0x0, 0x0) ioctl$sock_inet_SIOCSIFFLAGS(r3, 0x8914, &(0x7f0000000040)={'team_slave_0\x00', 0x4000}) (async) bind$inet6(r3, &(0x7f0000000340)={0xa, 0x4e20, 0x800002, @loopback, 0x1}, 0x1c) (async) listen(r2, 0x0) (async, rerun: 64) r5 = accept4(r2, 0x0, 0x0, 0x0) (rerun: 64) ioctl$sock_inet_SIOCSIFFLAGS(r2, 0x8914, &(0x7f0000000040)={'team_slave_0\x00', 0x4000}) (async) ppoll(&(0x7f0000000280)=[{0xffffffffffffffff, 0x4}, {r0}, {r1, 0x5064}, {r0, 0x10}, {r2}, {r0, 0xa000}], 0x6, &(0x7f00000002c0)={0x77359400}, &(0x7f0000000300)={[0x4]}, 0x8) (async, rerun: 32) write$binfmt_script(r0, &(0x7f0000000580)=ANY=[@ANYBLOB="2398769586de1813916dd5808c2520a967acf8ab1d04cf15fdac9483a5f76a4b263c2f4ce6280a3c2e300a00092a7c2ccb42c175adf29c0000000099951186e7e151e101915a0871d2d8482174ac19c11c1689efdac0dcb04db268af227fba965df4342c888f92389873e846401662af37f6edbc380b84f404d61fca1d0f2f4325867f7a4d998b52040887ffbab2b1fc4df5d3593d791a8d6b0521d97102c33a4484927d3e7af3c7851531f8f92a1d5305f96240ec65577dd72d01b2f988555366b2ddb42a57e48b078e34c03094c463a603f7a28c1f58b9eb576088969b10e3113607b1dad3"], 0xb) (async, rerun: 32) r6 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(0xffffffffffffffff, &(0x7f00000003c0)={0xa, 0x4e22, 0x0, @rand_addr=' \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02'}, 0x1c) listen(r6, 0x0) (async) r7 = accept4(r6, 0x0, 0x0, 0x0) connect$unix(r7, &(0x7f0000000080)=@file={0x0, './file0\x00'}, 0x6e) (async, rerun: 64) sendto$inet6(r7, &(0x7f00000000c0), 0xfffffdda, 0x0, 0x0, 0x600000000000004) (async, rerun: 64) connect$inet6(r7, &(0x7f0000000380)={0xa, 0x4e20, 0x4, @local, 0xff}, 0x1c) (async, rerun: 64) listen(0xffffffffffffffff, 0x0) (async, rerun: 64) accept4(0xffffffffffffffff, 0x0, 0x0, 0x0) ioctl$sock_inet_SIOCSIFFLAGS(0xffffffffffffffff, 0x8914, &(0x7f0000000040)={'team_slave_0\x00', 0x4000}) (async) r8 = socket$isdn_base(0x22, 0x3, 0x0) (async, rerun: 64) r9 = socket$nl_route(0x10, 0x3, 0x0) (rerun: 64) sendmsg$nl_route_sched(r9, &(0x7f000000c140)={0x0, 0x0, &(0x7f00000002c0)={&(0x7f00000007c0)=ANY=[@ANYBLOB="280029aa2c332d5bea9bad39bf59452a34d9d8000030a2b55582f5fd1e6fdfdec12aa9c829dedc5b40cb985464b366067f5b3489057398384641c619000100be085754872b39582adc020000006486764c95b1a0d000c4c30300112b7cd9776bf684254a0625a1f185eefcd79e03ee1710673a0a30af7c8713cbeef788109ce3a57a779d05208325ffaaba742b71c071c7597d4c55bc07b4fa208fef7e6db9f1bef859d7360beea439da5b6cd2e3916253d9a88d84eb1121f46d4882db8bd52f042df9def409cd02dfcc0e1f8707ff433700b14433fd9e9d21e674241f64c91664afe4edb300cdccd800a51c5d1a5b7ac7200d62ebf9947ed9d4e50cd6b98e93e536d65f5436fe2000000080fbe4ec0846896a11a1a265487beef0048e040000005a4490a91e4c32f69a0f7f9d8872d3fbbd96f2f280fd1ae1409438e5c5fff965c35fb8af18e83f5cf8581035a6e3986840fed343"], 0x28}}, 0x0) sendmsg$netlink(r7, &(0x7f00000078c0)={&(0x7f0000000400)=@kern={0x10, 0x0, 0x0, 0x80000000}, 0xc, &(0x7f0000007780)=[{&(0x7f0000000500)=ANY=[@ANYBLOB="100000003200ff04000000000000df2544bb125fc6c1805c5a085a3ddf0d15928b4530b24134c6b654d676b9b40ee120e47162ccfe0aa1542b60b68cf3b504b42516e19e0882aed94e605c52cd5f8bbceefa5ac3c20bc32afb238930d6b1aa0a80d386bd358faa2408d81043c28322c9556c3734f360f547092f260e97a119fe"], 0x10}, {&(0x7f0000000440)=ANY=[@ANYBLOB="50a005e90c071c51d7b6200000001500000425bd7000fcdbe7d2eca01bd592f93e00", @ANYRES32=0x0, @ANYBLOB="520a48b39d9173d5970c8f1a70e859cbabba9a79f80f467292bdb3b797fc7b0dc2c03831306581f910ae4a5115ddf7b676b4000020fe4cc2b6a3868ec49c50d8d4a4716ccd95746fe69938e83a8bb968b884364e69ba8d608930b0aec389f9ae23efe673c0425151a3542329847e98a065c62b47f5362c1923f68a56d8f5a625544964e0dbd7747a7392a3d28cdd688f"], 0x50}, {&(0x7f0000000680)={0x3498, 0x10, 0x400, 0x70bd29, 0x25dfdbff, "", [@typed={0x8, 0x78, 0x0, 0x0, @str='*}\\\x00'}, @nested={0x22a0, 0x24, 0x0, 0x1, [@generic="c392d28a68ff95d73755d3eee054becca8d740d8eb26c2666ba30490e1704b5c5e4cf81f74fa893109ded1234be3cc913ac896cf97b6f8615afce64200391aab5b63e19c7d19d125dc0b390e2231e08f13fc9e442c6c6b225f9c3612e8625d413ed74976af71be67b9f6bb16ca5eca0fc2507a1bf10a9dec77a7c12d3b6ae0e3a04c803ffa42dc412a3611400636fcd776d6cb35201c", @generic="238c44509c16c3c0d7346bc923f7e05e03a5a8185bed6227823e798f3fe2710cc8c6158b2264128376ef19154fafb147ea3ecbd46e550afbc5e6f1cf15d244438ad7b86a51261d7bc7171e827cd722506ef3412cefb1c9f4ecf5d602640843e8f9bddf1894ba946e900b9f4176cd5442cb8e01d00aa44131c82daae1414c8582b16bc8dffe579452a02b57dc54ca6874a7e7ebf1c0a822c849e4257e48462f4e4c29131383a63a4b34ed4c9b3fff45818532c0bafb0ee1807f8ca7b7e06bef3a37aa77394a440054b4845b90421559867f4caa9806ace11771254509c322bfb3cf509611c99cfd01296bc580ca6e7954723e89c23ee1e5da1aca8bb401613e4a10c00d4b0d4238027fbb4b8905c529e03940a266d0af3f55f44c75d22ec33e56815379b0b34c68f933215ecff5f1fbeaaca6f136ed0fcd4fa635148d2d9db8b600f3a2b464d6d4b62d3a6ebfe668dd8cf628692c40e932ad9538ff774d2892bdc94403463f0cf73b86aaa98d99608451aab9f51049fa3c8d7f3e55f05753c50bb87f5d957b1d13687a1c1144846c9962bbc14c9c693ed74949c935f703a144391e797ad0575ba7391a85117ca1f249dc97a7c418b501928430b53d1bfb2d398526b37476f991d1735309f4a74cb7a2440e268acbf716edd7948c708153a980fc8a102e1be3630a2e10919a4092f42c9d85f29b7add5bd7c697f8027c782cc8753bb43fc3ddea3d48bbb747a485273d49841ee33f2c9d5535a1c4e7b7f42824c5174c2804c411ee179e6afbe691957c3412981dd87ac20eb8744e13696de169a3abd9e25b16d1af19cfe62f57696fc7efc840fad8b13c4caa82635b8b8fa32e00193b78054304aa0ae20001236f2e879662725c5e33a2347a7aff39e9700b6d4c18c04417755b7db27c9853a5a0089baf4a8454ccea8618d94e6ceb2716c8b0243a742b9ec386ba9e60684bb2cf26f2b32d60a56a812150932ff0877584e5da429976bf59d24a70228fcff7b3e9506f051327847e1b3bc399a0fdea35f3d23a54b0d70689228c698d02ec68c44cbb1d28e37602eec3062c5312350fe386c14278c698061f2017a60bb4e394417bbead4ea8fb6f3ad1be6abcd5636f280f756275f3fb582e842cf109bafc552acd4eaea412c2e42fb0a6295f3de373be0850484c85a9a9e77478c338233aceddd8e736dbe1359c203dfcabcae35a9a31b8a7bad8b0d68c071ae93ecc65cdec6150b9846741bba249b583599e5757d32fe6ae8508e9dc2210ab9b9b679cd9cf2eda6bf619f309f6322742aa23e79126814258bf9ba2ec38b8a2be3c4614bd2bd28ff454a74ca3a06c87f6344fd65fe099da61e3bbf830a7e576795cd63dd97e21d6083685fb8f8608530fd8ef61e5c04e06ae5c288471bd009c2ea33db5b2416fa28c35f515fc746fc8f9d5b35738e36c6690f53ca15e46594a6b3f47d924b1b0d5cf9ed91f8bb0bc21b39bd7c1f1fe8f6fb543d3021c7268cba231bec6c90abbdfb2ba709c7dc64f44710486962a7ea8d34d4a39b440ddc893bbd439f1d72405b334edce3f212387424c3f5b3126b64709a05c718f21f87a0a7a4567812bff2058f6f13ef69316b8a5701edf914d85e189757e9cb469bf665ba39af808523d64c463e397a106b49dd2ba5410b6af423d10440ea2f3d6650d03d65b7a0a17e88ff10a4787db9cc5e9f4ef69bea96f48bb76bdf46de2c5a8e47031001650d8834fc861f7462e548fda87aaf3d7d577c2ee2c43554e4540a7fc63d0c25488b58b6056a25da6acd83da886b508f1a9a1da583753b16f6a419a37f68298351ba3eab2925843229d47c8c87a9afe6f3860993595b012374fdee4830691d62ba6b8a4e7dfdfba57fb879b8da38e3cd2bbb1fffe8e19066a48a206e8dcaefe0672ffefca64f1762eafb62fa1cfc00fe66e00baaac7f5adb4664541449ee957f33ec5683105b7ef056dfe5bc93875371acbf0530f95ba6ce5394fc19ae5967ad8dce677522f3719915e9899700c8403d7dc7b1146b40daf69414bee291596f2774a848d1a24461ff0d78ff7a309afa13fd5a448a137abe2f85e67a4e0d333bcce8b95d09874fb253529850e5d67cad0576d230769fe4d6ae02251653b7622118fb0a5a59c5a225d49583c73ce3e0ef4a9e6ae37ceb9163d0093e590e067c2fc1caed574694c8bfafd130bad222e2ad16f95fb3cc51376143b53442c3e41d7713c48b6816f3ecb350ec6d6e63892de8037106706ab953c073ec8b88738bc47ccf185fc20a18ac40f1b66bc12cee2b161553f556b555a5a2d188df60f7a0c0f07e80a20b0907a65cfcd3f170162f73ac018e43d55d7e7488bf122df67266c6a33fde3d5ac33c14f2b0862ff9f060bea1c11035a8f4ba07a70cdd23c612d2ce0bdf4af0081c57ba2dbee8a64d2307554f1249d00cc97bb81b8af11d009021d4e16226ad4c4a19828ead98b74204951f2134d1b1077590df397d99fe9eb690002c2d8aaab88cccde1e50be7d6ee37f5ca32c73ff2e3b5e069c5501fbe4977a49a88fec1cf3d28d8019d189d59cf85d0d081a58bf791eb2741f9ddc2bbef1e542f402247184b04270bbc4290e31fb3e935ae46b30aa5aeb3931a7b4e6dd24a09543308ed9bc75d4df3e2b41705600deec43976935f5557f53c34795c01f408507ae67cf82908feae9e7cca6055be8a150a7e4049a45646418d9c1fac249b07f3cd6567d0892535b6a19f8678bd6d0bb5a83defaee3d43b4cc8b974ab69e87bb409f18f10f224f33c16a8208301dd30ee3bc72cf46aaa49ba28564234cca8701acc4730ea0532d14eab8626ae6d9a42775569e213bee3595b040d24284d0e06e0cbcbef349363a6912685f6723959f8ed03da9e54bcf13cd97d4a2656d50003cde7427d4ccc0d07fc005a9ce01661532645d95da30a7175d4bbf9fc804239a3b88c9e0c3b5c8f06e64c762362d11957e82486ae464f82495192ecde94f51e95d6fbc45eec59a7ffd110dc73b7c13b1159fdfe19b7246981738af4769aa8e5c7d0eae0e84c08c9a1e3ef3947850a24223322e97b7835ef1c74c3dad00f8c94ff618647c6f6ca149412769b63f6935c2de6740575ced735bb71bee0673f00f62952819ac3ecce855f005ce32030bd38ca2c155f30e57c487e4d6a2f3516927ce93ff624d24ca5fa25ae8af59659352e9faadae52c6d3fdd1a1ad99da806169406adab524cb8c0f1a5a9870d7024c75419f0fcdcce49c3de032002f35732d46785d9abf8899ed6816cdfb56cd58a29067558bf0cdf7d1863c889b5b06c6de9187fa13d68443268ab099ebf948570a18b8a95cbdaba1db2b59dfa7e26160c4e3a1a59aafd3159ce1b4f2b379dd4b5822d136009c30451cf0f87689661fc310138e0abe1fcc0e9f44d428211a391d0692db59751906cdfddf3f0e3ddcdfc037428785274453193c43a172730066ab42e407d350fcdfbd19b79c7282cf2ead0c8dd6582197df211963582af5473f86d9f0de4ee8bf82e2241267a9b34d6402225c689451292585aed7bf71dca358f8bd51d91f4be366d77a3eb3bb24ba9a2baf43d226d0f42b6b5937594bbb9601666732149afea9b01308cfb3e6d62fbb9da0c420923e18bedc06eb61b1ac9227759100455c1a1d7ecf4491f3c9536f8a0a132992042a0eff7e5b0eab27310db5ed9072c90d40a1f7c0316c2070eace3bdd37ea76b5660ba0e4e805947c4807598506646281e344952ee4b0bf5cd755df4aab586fbfba2860fd05ed70c88c233411392545f25853fa0ca5a6a22fd7c0d24d3090de542365dcbc86ee4da3d5a0030cd1dcc109cd8228b80150d8ca9130f9a60d2c41779f96eb955789b4bd8059faaac0cda3763813334177b18a90da3cb0da05267ddcfcf734436ece3096ad47c5b4a4648f9ac2f8d0b8e4909efbaf79b441c1fd57e173c842c388b37fdb24dae9f35172223977285118eea4ee6539c219f165d9c73cb49ae87d0b5ebfc9a51832c7713d76f1f8312716909ed04864aff390ad5fa444be5732492ee6ecdc68bcc51d6d0d244fb27cc5d37c3ca048c11b9e1746d0dd93c2657d73cf4bb6b8e1c1c078527989868322f04d12d84794ab7d7a86cb7eb1fd316bee7286fb8eaab394ae71db02bdeba08820428342760c1382ea02b2b11f69d8eca7208756408e192017ec55f94a2b11cf17b7d7e0cdf5963700c7e073726bb9a0a7c5a9c6538c346e8c51c8ee624203cfebf89640004e13a01d0abe5ec851960411d2964be9bbab1349b77cd08ff2f5f3b522229ddc6e738d080d072be9a6df16021cfc8603120c0614f735c9d0a8ee43756c8b153602aa5387f68127ca53d146597a7c36dfd3bc6948e3f24418affc377d0c9f62f86307091a5523d9c1c81b1db6c5ee4000acfbbd98b0e2766ee028152ebc77e2a2119702d874613f240543dde82f0ac5786beec271f5495b7bdc4d7278863383328ce8ad6f713c831bb7af41ff07288489e436c2f31b8710cadc0ab03b23047ac4d20a3daec1ab7d16d7faaff68b413e69b719c9708a758bce501bc0ace88ca7c72b4c450d5fc6dcbeb036b2a3ffcd6c6d05fcefcc073c52f555a04ecf8c7659133ae203b927aa389e1c7a74cce1e617088c7157b14630044120b58000b779386bb17c8d608a584b24a368d2a04c107041a20e7689d9d9c835b73907de136dc540945305ac22137731604fd9e8cce64fc2c65ca4fe48b424ae02670497c12bf6c7ba006d434e1a70110597208aebdac009a5aadcbdceb9a37494b0fbc69b568d4ba016a00f5aae3a57e2b87ae8a77ff7bdd6fc810629e69368ab3da4e6c6d3e92aa23df8b3d7cf1098206725ff83d4f34cd7a51b64d0aeb37503e8c6589f95f8ccee843a63907b2b6589234cf95a19f1573d9edf4b0bcedfbcc954c07362b7f1773ac2a7d0c75b531f90c599446c343360b8fd1ccd5fd9506331d3270fb6c81501088377769785ee5036b18eb14687f503c6df9075942a58244e94aab740245c132c1f6b5a74075f6e1a0d6cb0881dc9c684e19469b67392763662708a883cde65d7e91f4e7a5d5c23ef57e0cd77d9c1517502df57750075c945202bc27f1ce68de66679bfbc978c86340678d73e1d05a272dd5a14b02e9a6316f95c52bd89a73e7d22b2d9b4fa935a53bac308c32169af57a63b9735248ee346362dd773cca5fa682a74778751b0e749f1871f62fdb3ccd71b76ecb61be7c08467e4328ee6dd5653e0bf6088ba708b9021af9c46ef96a5b8ac2ceb5606da1b9d9e4209713cbc12fe306743bc4af2d500a73cef7e28305d65fd34ad2a22c72f6d134e1f87ff66c049565c9e09e2e5081283d22e383b520947db76830db840ba6ae6ecb1c5c15cd71df47e52211141301eee9b26087e9e033ce59fe5dfffd5af89eaba453d3fe4f512c9eee430932252acf9066057f499e452c7b5267acd52a1c43e177d43d0ecfc4378cbf701adae9243b9ed0db8862572ff4e27cfefb0d8ad4bb40730507678c72ef03dd46d0b534ddbe9a9a8732dcb493cf3ac9cb38d205a935b88d67688620df25d526daaac65ef23101efacdb38f7c470d28bd3bbbffb35e4e89584b69c57ddd445ec507263cfd67b4f2d00d5ee0730120ae8e1a3c2f053c8be84ae2d42b36cc68ab8789805eccc40886f97968fa830f7f36913a5fa39e02a58df3c36752e28b5eb10c7c3abaf7969302c1f47b3c4e6554cb2d3cbd67e8f0d8ec71c43108409d723f06fe291dd9eda7e046cbc7288a65ed4f8a10f0ce0a84df61fdedf9f5c8c5d4c6574", @typed={0x8, 0x20, 0x0, 0x0, @u32=0x4}, @generic="5d408cd386d05a267ca3e4cac691d896352ff5e0538d4b3cbd3fe502159d1fd36ee713d3204a941825a989bd73f2ef2baa8649f5e1a3cfdb8b369f4580f7189b04c7", @generic="18393325c3226ea0fa11fa2741900a047e9d710d1b4d76e2075bb1f4ee29e17a43175e2c0ccd544b83223b9d725b765511c384b27591ee3a19943f6b417a059b48d6da5c40a5378bd8672b00f94ab6c5c7382ecf", @typed={0x8, 0x1b, 0x0, 0x0, @u32=0x100}, @generic="d8f99c4f0f1872439fe024b672853fb4f5f8ad591b8c1d1802f24c543e7bd38f80aa07533515ad6c0ecba5d950dd844ab782fb1518387da16bacd310bc503c5dea1b1ced0162e475bc60a71f07a08527c79c332a715e030e9f43ecd9121a29918f8080230f88a664d7dde4d3c04fd2", @generic="004b52f520fe2a84a261f616299e0dbc21479bbd017d6371240788baceeb8dd74fda1cf330f5fc797a79a878a522e82b6bccc267f35e66ae772bcf44cb1754db9e16ce19d443d5fdcc8b6851cb2b9650c5c9b143d1730b40a80ac962d658de7c107862dee283b38790bc61a36ef652d905c8ade14fdca515250bde0a90819de48e61b33ce6a65d04f55aa9d120796b501fcadbe459baa6fd6312279e29e953c08f2005ac6f4f17e5cc7429d55516304525ae2e09944e2a0cdb2ef8006b27583f2eb58078c9193299841e7256c9518b3ff292aba7e1ad985f16e7b5b7891d8743fb592477900d2ba8716ee2b350ff91f7c952722422ea64ecf6c0a4e899b89f1e489c66200cb03cfbce54b6512c701205096c08c47dc75dff7c447003f7d1ec04db00263b1a8edbb0d7981ab9b0363cc0c9b77126fd0f18f5ee6652b8dfe88028b83e5392c119da099341f2611d614c84d17e695b64100014e3cfa105709e9a7d6499eafcd8ea99b5c9657ebc51020365c5065906aebd0fea26d63c59bc09fb191c231dc61b8201693794acd81c8ba9df74986532da57b479526c3cb02dd16d597900451ffcb7e6b3cab85eabc16586c61c05761770f3028d76bbae426a948171bf2b4976df6eaa6ab213b9d0689628518ae57b47b1f8e9b1978fe0f9901981aa3f9df4a0dd0c85d7ccf5156926621724930b7196ba30ed4f5fe3f4226846f83e50e4de903c24ddb041343b78781a62a6717eb2138c1a7be2d41e313b9ce3f30e1a21a58565e973310f3895ad61e2ed08f1dd620bb0d6ee83cfde39a0e178d26feb3a1675359d5305cf01817b4a6e69004cc7c6faf799f7bfdd5390c499a4674dcf24748a5a8f95a376dae6caa47e5bbbc5ff56eb7a3c5cbdbc521f4ca7dfd95240e2f98afbaa378a64cf4ee27ba53566e6de8a10248131789406d877ef716748f542b23975b0558004babe08dcc32233232b44169207f88ebd941a3403a0e1973e7fe4589b237a9249d75f1d39bd6e873a6d0c98528583d076901e4a52067ab6b23e2162ee0ffcde0b40610072fc6aa0751a814e5b8409e8e9f3933f527a23a64499b7e0e6d215bac488b94938988b1dec7fee73de6731b8de3935a6a0d05a915cf1f77b2dbfb4ff014675fb8d2722e5a0c6c98633fd498918c5f01fcbe926e8611654f318dc7f5ff1b192899f726f9cc15d5f385f998949c574ea1a34e98cb19db2e8d2aa3395c51494a5bc34acaa71bad8e8f151b88e11e7e7dede9b908236bceb3b717956ec3355d70cd083e39fa30ab109f79914a025f1b6872dcbfcceeda539714c29ff8bc4b98768f6d93cf538c75f9e174a76c53850a5bf2ea7764186cc0d281c012bbdc2f43b506c370605a79d98869215ab0ea16986311c2634d78aaa5733281b53eebb6422693ad5fce5ef4f1871fcb9a2884e4b48f18ed1e056e4a1ba397d064137d4a0479f55a5d84040acbbb027d3e95f8b7672ca23deba0a4b4835f5cee288d2e3cb980a9e3dc822e576d839a87416704b8fe13f06150ed262829567a792b8d5157e2e2e1681e30db9f3d2ac0f23062cde43e6d4facf97b1c080a0ed9c5d3c49ca7e01ec6235978ffe3050c675ed26156ae446cf419f28bac5925f5d4ddbc5ea9e4a7d943fc0889974131ef7eb4868d9bb118e77ed4855538ade21263e95ec1970277232d19c34fd1ab56c3468e620bf69174a4b019a418ac55d3be9f8a6df193b13376f63ac0232078623f8a3585de97a141488f74b105945899723813d4d357270b768adc15b9bbd69356757aa4ccc83c23d9a58ac3c44da310151e6ac0e238c090951cd52f413790fe5d1cc85ffd0f568bb7ca2f587707fbf90134c4b4e7ca863d59003e04c4244b855ce475366584874e9e25da5b87ed423983a27b770f57c8cf0e1b2037faf477c04e4c22b9ca1be09126e8190a48a5253abb293e138c0f066fc2f0d935ab59d3f6f14cb048ff42704b4a78d338f28ecef27ccf334a12517b159425c69c20d5a8b5ce5914b244f02643a9c6028e771fa8a681110c8c7c8ca6da4181660b8f215e73d5a85d81f36dee4118d66bb29e69bcd9d3ede9c7be5d3fdbc0780ad45b2449d452c3bb135f00122f2150af42f39eef3c19472f945903e5121e40bdbeab364f7938ebd95de722629f8e433a9904940f7da8291573611860a2a337495a2750b39d0189b5d71477431c838f843937ae0873802d2f843c1c431b927b3a9fb2e0d18462329f759efe52e94f1b638b7d6ed9beb7eea9b4221bd6bdd24b8598aa7cdec9594f3ca3fbf40c57d7eb39259635001b681ea0363704206a868135ad1e4360d04d2867d40c4376331200ec89da5fed3353df654d672c0c00e53e55555749fc429cf4009a5edb4f8f160576b3b248ff0f203774a2139ce3192501d4336a065cd37773516f4a50bb1419ee7ae117973b6d5d9b8c221eab5dfd174c2fd4a2716eb93122c04b2e5ca1accfea6a8670a45a4fb9307019bd43b7eb731d5938da34c0deaaddddbd200a2cdc80e2c612e7592851dd123aaaf4e9a985bc98068a6aafeac2b4368b1169f28d0552cceb51f67d24623c8bf7a76ad5baba02ce5fb769a000480db5b30e1bdd6d9b9b073c30341b8a43bc860762ccc5f9c9741b312d37540220342283767c5e4fa0636d0f59d65d9cf3e7c4102edcbc84a164bd0a7a6d37336d38307e9d458d9fd824b5d9aca50a4c720501ed4f9e8280cf334015a0ae25e54a27d915cac1fe7a9a26bf89dd3299837fe5086b1fe217aa5d9a363d61a83a194d4b42f14ebdf42e19f14e1c2cd88f41080cb586ed7e50b08e14f314263aeb9e2caa1dd47b649ee6a875ea7512636173db78747ce2ac997235c296869b5c295a1898cdf2b509e38761b175abc04872db51ffa75a4a39d6d46e5df6fe553445dcfdfa3aa43e08d33ac26c184a94f41789ceb0adfe397ea9de5d7d6d91368640518735f7af46b78cf4dc0ab201434648ec299f3d4a0c1200ef7bdc58b5d7767384b33f95e3fe953f55881cbeb435c146c1aa494995e48eecf0f2e7bcb726aa99fa9e8d9d9b34a61fc91820e8f6421875bab39a67b4aab9989922535f559c9d769eaeb4a4778c4de17bdfeaa6db831f77e49d28c18d37e6a8bc4ce5ac48345a1162dccf11ac5703fe7f8238d36bc396b855815bfb892842ee5eb49b36eb2a70495f8e3074899320e493c28ef17cfe76282ee20c1f5b4ba7709fcbe6db3fcc6c5fa400cf790ecbcdfc18883b394a48d45b287a401d700651fcfcbc7385b905d6955652c54106a49af4eafe5cd8e805ec6fa32a99765cb2d966d0420e8bfcfd1652c0caf180e91c961046ae47e7e411dad0801ab7a6015c8a7f6877f963eadb88072786f76cea7a063edd706ea26bad209ed561d56a30d8d83ec5021023391ee5ac1985e0c46b0fe71be253bfc31ebda3df0c372a7544f60a50c9dbe670905ff228cdb8694d65c088172434398f9f89b817ba5b24cc07444247baeffb1bd412e711a818cfac48098636492fa9f5062e81e7154a90ef408913989fd43f434dd90aca158db1da88cea5e2c6836a74e38034cccb64801db41815b1b3d712a4921b145b6246bb11ea2d4a121ac68b60b87265efdf89193a88f0d36849b2ce789af272fd9873bd415c2e0a8006fdfa8fb2918517ad63085c3e84f54b82ffe160ff22f17a9c5d487df1c151b01f562c839e606e8a3e493f6e6395a608560934ce83404b34f928e8ba6b1b4011d119fd9524335e5369173e51a72440eaad3b775cf7afe30d9cdfac79a849ac02f1212a6ac82d366f7ada23df0de9a9b5796fb1410994fe0556322ec9cc0c730f3cd48df7c3d6a8de425ce5b35977c17783008cef67160d3cdae9e108ba9c73a7cfc79eaf519569b8cbe1626590c6f6872920dc512ba0c87c7592e82c13e6a8ecb0c0cd2585664eb57dbfe5668542674f0519d2b0017106d36b8008f79a01f991819e7dee37a192f8f88b184859d0c5f65ef072d3950293d8bc9eaac5919cff52ea82862a4c31aac87ef46f4669af0c69349234d5aa22eae4a5ddd781ef5f69990e11f16e5f754b205c987ca3c99931f18bf6216c500ab0128b0db24e2a3181ce9091428f1767ab63af493658b0cd084cce44afa6ab0ac8b4b62e6834f632293bd90a145f62d41f96282656093c71a08c550fad2767de8ff72a6c5b1cfba3f9f053b157a562e4658e100de82026567beb77263878d34bedc05549c9d02d6fb6aee7e1a2f89c97b4d2aea05411e192db38b9efead7ce3731733eacb5cd6587e2e8dad8f3eb1ecf7fc4a3a0183d222e4772b897416d85779f1c7de6fd4b270d598b733ea97239b998c12e904c103bec49a4718cba42011ec33b01b8375051e0071f15f8e578f226e9b9235d7dfb4ea5259f2729c5c40346fbc04620def8051f81d2ec9f171865eeefb64925fb9b9a05111d6020b6838973d0ad6717ba56d103ab4cb80766d9eec5ea8fab94383b4f0d99db716f09ed818a5543e76c8a9286bb4d1f3ab8c4624820227617be7a96c9fb8dac2bba6b0372544ebbf370a447ca9e21bfd281834b87c658be4069daceb291bc5009988fdaf5ae195ea708b8534b66685d0564cb6cac21af46e7839b71047e4a63ea6f3e9a988f9541b25b29fc5b9da5dc69e44ab2f4f4a6bc5d628bb3a71f3d5c489c2a0d700cd4d5f6d279e439a52d8447468f05342e45e16963aa56c2f874dc8bf02e008846f663bb1fa8f6bcbf84097c102c0bd76ac78f2c641dd8274877204fe25a0129caa36f5c0b7991d1e781e66fe45f4b3a420c79803b97543a7c5e96eccf5f18997cb25b36521d859d9396a2bbde0eee351c6cc9388f501427a6fa0a4d3adafbdc829ea72e4659c9d83e50845df789461f2c52d44846a2e431931481b17f9e7e853314c843ff3069f633848bce2d18ca78cd74ab4460146de10c8fbdb5f3edd34647d8e3bbf2a7f4f034604ce14c1797329111c897dd51791043a32691b51b666774e655755fc891b32daa136dfebea17eb815cd780854567e0c19fadc070bb28174fbd128b88f7757a10f910da0418ce1e59ae4e3ef02903ab590a4ba7fbf524121feab2b3333d7b679a75cce3ee8caa83d19a5141f218a9987f83de966e7fa4318edf62bd23173501a21520943dd68d0a99f471f6438abb0296a4a8da141d4c2c95e7a607d76be5183f7c1d5863198d2223de0fb72725971d748182f66201ffcf8476e716fefb2043eb60ff322a3dd22c63bb1f49c31a87efdcd87be13cb8c9d1afca905173eb7f3c3660120c888046d74e96dee3f8ada6f1740e4bc74a0923e496aebc30eb6e5619c5947b51cdddc9930c78477f43de7029a1fd1abfb39992197e4f935127fd8346d52edef6617539689eb0e03fff6928d30d9d5ddc8f230f5cd64093e1c502ee4afcbec7c5f529388ea333c3af387321913f119399d8b373b0b4431edac324df6a312960975dc5b83f078e63ec6a4484600dd81cbfcd9814a3d8edbf03e1b8d183f5452123419eceee9d53b68a795d8929d6679ee23415d75b57f862989487240ddbda1811ca6860318aac7ee0ad439b6c65083c20396c9dfd885e8f0a4792f08f5e0b4285b62077ac48763d28f3806fa7b6f05f25d33dd97c63587cd7e2e139ff0e42d6c0d6f05dea9847cd49c6b6934426a42d07fe111711fd01fb00bc5186df5ba0cc2cb52e5beab02a36b2322ee9c5b9696728018ba5f37d7e5cad1be0615632e6dd9353148c5846c2d3fe806a16b58ef045d8de4ac96ab523efd4459fcc611cc97f2f6937afe7a78b86", @typed={0x4, 0x6e}, @generic="2f877767950e2655134cdd84cc3dc0f4f154281a9d9a44b24ec43c07a45ee3607585775e0143b707aae48396ffaf6d454948999323a30692df69b61eacafde3e81efd89a23e741a2fb0233300aa53752c198337d1726821307ef7b7f1c3d60b21e29ab35e231ec2bace3f06f97daa08123f02b70133da2b1045b52058caa346040298112bcd6849dfc61477271d54fc0619de4cfa3b61debde231cf3d23f4fa5b4accfd3c144fd91208b343af00cb92326536ffa60dd962f2db5fedf61984034ad5272d426b99c9266d3b808af9dc820c8a2d03e2eadc8194c1de53c22d61447eaffa74c15a0fcce5824211701"]}, @nested={0x70, 0x5a, 0x0, 0x1, [@typed={0x14, 0x79, 0x0, 0x0, @ipv6=@remote}, @typed={0x8, 0x33, 0x0, 0x0, @uid}, @generic="4080318b24064b55128ad517f5ecc6bae86c92889e42a174d227f88740e266283513ed4ccaff12da7b351e2995058ef1b130e0eaa22a78d459868f2c8d21ad96cc74396e", @typed={0xc, 0x51, 0x0, 0x0, @u64=0x100000001}]}, @typed={0x8, 0x15, 0x0, 0x0, @uid}, @nested={0xfc, 0x88, 0x0, 0x1, [@generic="4c8359ee1e4d9ba13f97e3f2fba9c0b40709a96569bdfae77e55938113052fcbdae5e87109398752f6d8b99b6fdce05873617fe9fb7dc3006ff50e6fbb05e4cbe73aaa389dae8a2bc29a57f7358ac0adf4130f91d887b5239ebdb5d4722acc5279f78c9365cc66ce84b3bffa93f66ca4d329c58340b0623f3fef100e01059c84d7d11b7f53c64a8f55ce312bb07e909925a1240b95ea1544b77138835711bc0691d6c414801fe51a077d9404914efea4e70f5ce18e7ccf5937f7c1a30b450c5156546548cd8bf8c1bfe48b6414067e47ec5f0849", @typed={0x8, 0x43, 0x0, 0x0, @pid}, @typed={0x10, 0x5f, 0x0, 0x0, @binary="91f2f6fbb55d6dbe436e55f8"}, @typed={0xc, 0x7d, 0x0, 0x0, @u64=0xfffffffffffffffb}]}, @nested={0x1014, 0x36, 0x0, 0x1, [@typed={0x8, 0x56, 0x0, 0x0, @u32=0x8ae}, @typed={0x5, 0xc, 0x0, 0x0, @str='\x00'}, @generic="af8c54b2fb5bef1d5df7d7093eb07a1fbdaac1505a52fc525bcb6a775004eecc7ef71d3457318078a020c6c6d7dd3fece43027ff3f1517c461ca5f81f47db3dfbec592519306ae7c8aafcf81b91223ebb2c25717f0e26619886b6edc358bdb8df97b575a93fe1189543790d029b2ef2c70c12da95846757a45d23c5b83f6897e1ba04a72e455344376d04e6e1be855e0a9b81b85431260ee0f3171e77d5e8b2ce039d5e61798942033cd82300763628294f0e269fc726cdfaade7f890c6838538da2595bf209d531938be8d974e3eac3a60bb0aa0e61471e80f89ed8d3d4aa9af6582dec208f254dbf6b960674fe2db6d390b6f37f99b0f9bcb6b66c5575490ed1491b1ca3513ab28c80fc55e912fa9181cc0732564798e6d075c5cedd8b34d40ddbf85cf29ae9df4557752b2c570d9832e8b4181cd68713f9b425f692b1c2275dee6aae5136eed160d1a740d967de33c9b942565202fbbdec45b5b34ca14cce245071e142b27dbd11bdf62f89299eea1ed2020d7d32bcc65dc6615d22a2a97bc0d58d9478ddfa3575d9c9a9ff56204e015b51acb12444455278d6ebf3f2b9715ffc7e651498e1911abfaf0181a200af387624eb2ace09cca48bdf3e37983a26961510c88304ef00e33b2c7e2df47894341c1f28b5290097d59ed0f42aca384f0b7397a86ccd46d38394ff1d1fbf34b2e4b76a7a2c2e391eb8fb8633ae0d278b109d4a88eb8c1b96f105e1b87c73eefe1a623c121759afa771d0ee514efb87255689b0f1a61b4101581b26ece2ab0d5492568f123f10a7ceef7e47ff659efdc096d005771d2e8bb5a52afca0bd1eb2da634a322636f07dcb95fa4706cd06dc8dc01e6b3eb2411f59b4c34ff464c7c00979e5a0cc333f05d9897c34463378f51de23b075a3ffeaa5ce6238c383a618f8dace7d4cda990a25bffd55abe26b336de1394ace4b35fa1cef521f98ffd16de6c34b7146e487ab269fc899ce58c5c2ff92b8fd1b8ea3b585d04345c08f126c5aae8c3be680ff97e35d32096af74b77293357e8e047dea6d7767240decdc80abe10727f9dabaccbc5504f47b7c7baaec14ecb99d8213c04e6deb3a2c8d971190bb7cf30646fa576cc574718fee21b4c67ca173be2f1bed8b423259aa623539bf2e300734be53ed3ac1faf6f3fac08d4503acc043afa2655e673a8685bbfd9354d1504cc48411ce28476b5b965115072c83237f67d57870a3094b92503118c7985d5ac850c2864a28f0ce7a922c7f989bcde4714293661c1f4a30fea7619d4010270696542331c612a4100c1a1feafc89ed7c169626049861022da615342604e7ff9756eafee6af1c8d14abd445304ae502bda6985621c538c326f2b8250e2b474ecc2d9d4feaa601b62466927eab9aa2867f76573746092ef30f9dc99606940674ee862a3f28c230bff03e1000df96f20ade868c0d739c5dd615b4f5fc1f7b5a248b11a5c7501c8df2f8ca796426d13726d08cbc8a4aea585000a1efbde29948fa99c09c74581c8461e0034dd52d6d89cba1c8afa08c99c0be55f91313baf43f2abb173a6fdedb831663c0ba33c9241e656e9bd4af112c651a4d4c1413c4040547e660917e2f271814da3ae39f2ca0edefe70ab9f6dec84c800bf56675d88cec2e23748482a30eeb0ec977fd0cff9b00a6f80c0e7152bc7bb7ce0cacd73e9dd6ed37ae3dcd178163f7fc74ade33b0292a71d03959c400609d151649f380df1545ee85e73cc96269ec13f5b38eb9d0449298ad388dc4d9503e8dab09fcce03a284e6d544301c7e8652dc9695bef4a3c721a089b6ade9fc99125e0eacd0d17bd0326c1f2f4d48825055a60b19982594e7585b45cc98f03a97a912754d36a8aec7c20ea8ff5cb83c9202b7097516475552d7d7827c41a5ec694ec85eeef2a55d84f91f29ba36a6c48eec5b2d1758ebea7092ce358994e2f2c496f508f4e514559e09b5975e2cf077df2d98bdd3a35c78a81a7baa58fad6cba694b6d849d19d8ebce1ef61bc90ce418894776fff026021e9099ee672427e7017625954e33fba0546c51f3bc911f407b04e713655ec2df6a20fa6c9e83b53dba8baf894ca5c5abd360ec6f6ec51ce768f6d74be4911ed1a8be38d02af1dae8b571cebfe7bcd62bae2c4fbfdef7baa2603914f4f4d089c86a27a465047b489928ea92d3de9e82f4313c0a71f29a518dbc57a6d0b430fe58fcc0c0cc5a491699c1e92c4c18d6a8ada09d42b351acb436045f9ef7b9d37f052d72fb9642c63bd5b1bd2fc83a2e57f97bc91c04a367eddc8e85842a95efcec147e32d177ea91606ca1f0479a8ce940a18cc943e8920ea6abd1f423acaf375237d80b12715ff2cba5fcef37b7b9a1dcf10851a685719e8c9f01b494ebb59ca8761b670e6a6bb56a5a5d7df408a12aa1b42484bb029d602036b18d9e64f82fc20beb9dbedc86ca24ca6955234277822c6e4b1075afd1db3dc56ce65e96decd26698c347f72965a3e433e901cdb25969abbdd6e2ad55ac94e8d4c43a46bdf4bb722c54245bce504a761e180820a1cc0e269f575f52a3d192c9fd20e70780c2d98486d85a0d078fdb57978ea80a70ed3defac5ba81e3f26df3c38822987850374acf6ca27153b2ef94939ed3a4f9786b4b92d7ce050d9438e233bdf8698242a78936c96d2b86e8206b76eb4656ef8a3068cc46ea5200c53313c0c247fb5122a27e3809d81f108182393e54e47960e347e619f9d6a411de183f6da8a3f8bcce00f72fc8efe412d7178ae5ddda1383de6afa3d6f2a90b8911289e946a89b0b619d051246f2ca34a77062e4cdb686834a1c75600591351cf9773203f7f202f6f5d40ad478c6cadeaaa896d45a13cea8787c4be35b6b963c13649bc6e7b6001bdd85c873c57c549d17c94965efa598879b89410df7e48e1eded91c57b052b6e7f63ab794799c3fed6580f4c55acaa33ad466dace47348c8d32376444d6fd98c9b8fbee5aee52bd30f190f95ae064210445c2f22d8468a050df6d05a834ee12e20204f1ee1d2e2e5cb22ddb70ecdb26537a61ea9d07707c552bf10c30e15851a9ef7fc034c843b006922ecf3253871978cbbd8c880c250d29dbc4f27f38446006a8d39abd49b33ac0759b634b3095379b923d98b3c1992a1a91a4e0633e20737270ca7f787040a27b5a018f2be98e4327f4b24e81eb9adb5c434e2c34e8e16325ca9c544d6b1812850fc51d2ee33fb9cc98c2c503262f4c4c6d2747a898feff843520e2675a59c601e17dbef934dbcea1368f49fd08708ee1fd37f00ae37e0a80b76d853c9bb6b104d8f3a375acd3952e6a12a902a4ff4c01c8935651bbb96182080ad056f2c6a722e7608aa2d4f3c8be6053a7cbc90928080a402e359e84dff0400cf5c77fda25855576cee728b6f83fa2f47ee75412485b65decdad7f5d53327102ba5822a840e542e16a751b71ff507dce6a85ef5a37a26495f25754fc0f69b0067cf92a8d88c01970689152dd4dd871e476728b4eb0df0967df11339a277a332b746210ed10124afd1bdba995100db2d996c652d24167ae6dc24173d4df75da62fa4869faab5cee7c7c81295252467db0a65e141f29f0ad4f22c5d2dbec77cd9ff29df30a321353e81d1582bc72ddff3f6e4191eb1267147b570591dfe7203e4ba180efc2b52662602cbdb17a01449d2ccd9acdc0c3ba8c1b079b77bfca8af264e721c97fcde6ba6f77b0757a51d9fe43ee2bc298f2ba8783376258d50fa64438c77d481da64fc143b61deb271c6b79910afdabfdf8f46e71de8f49df9eee7a5a88e88393ffadec6a941878229827a94bd1018cbf25832ca7264162b43bb84c6c1958e8ac8b39436de0d2cb7600453cb2340161dade582d1f289d7f75cf4fed63d29390a9ab62fcd4d8bbfc69ed91ec2dc87bd29d324c13473e0d240b3f0f7d731e5f1a02d8d3867da025c88d5dab2356fb067d67248721d0fbb4c58968fb2a187df0014eefaf8c009a635b1a778eb56643b4371d24c3b8445b5b46062ef3a87ea7de8234666c6893a73083d4f7dbe27cdd8d1bbb2ef8648e9515af24afdbd0e16a38a2e055d37f9949facb217c635dc7780d4105bc2b1df20810af650e86fad0bb4885989ae428467c7c4e178646358e25dfedc70fa377c14ea99be5a7c3c8fecda6a46d02d1f801e77b52df0e08f1236d3de7cf95b84132cc4fc1d9a490227992d3c823d20cf3a75a0b353b72ac0bf437d26210b4858971937f579b6650522279b3eb875033cefc06415467a572345c4b36a5d8f51141ba8258194fac6ea49675fb142372752d8dcc497f135bbccde8090db941b54d6978c4f511d7f71db190502583f6273e078a724e920361daef908af93ec22de9175b21ce0d8294aaaa29c86491473f6fc0d41004e05603d1dd3f34fab0c39c14da2ad51b15635ad24eecc53777c7180d74e10fbe67f17b2d8c873f8e47dcd10312a25f2cd6b7994a0340656b17831910132b178acd2242fdb659224d3397ffbf5f8f4c1e4c2547c491d71f05a2a2fd6203a47f00139d2a44e096999b7768278eed0f9d5f3b2d6a006de85526ada5b65017b8f331ca23a392e410a5656ccaee69802b19a2e044d1b6d4d3617ff0185085d93f9f8e967406efe15d1025a1d48bbea59d7b4020e3462fdcbd44decaa586d17bd5d2fa7aa17745aa7597f2aa49a6af2c53cf2efdbadb9828e730e1aa447b08ab5a4b807b222d23cfe5a1b1f0624e545d2017e1b451435f3ec2241b92ea351acde0171e37fb1470d872a5aaf1c38342b6fee134002f657154eb9e4efd99a712827a50ad98630e7e2939e332ae9c5e39ee70a5d547d0fc9535fa7740dd278c6d03ad15ba374e364ea046f49c783c56d1546e766d90fc15eb7c6e37a4719a045842f6cfdff343f13a317f542ac0c19b61f0a8ba1e34c3690b365acf3c7bdd4ba72836fa6870b780b7c7d5f5962c69d0c4422fc992a6d8aaae739f592afe07af06a00c9757ca669b0744faa35f36ffbbd84727e92b643e37dfeeaf03b492b4b673c09d18ce2f3efc474339eb2cae605dd8144825a4d42f41a9cf4d4960268c469a46765f045a6056d78c50c9a2f25eda048a4ec9a32bd1727f4649999d3950a61d4053716c4969c387ab89f8d9f497fb76d55048206dd459b9174e6db9ce7448b0c0734850475bdc23595de94790b2d7109b577f72758f80b01e896f7738d5ca55b450bf37d8b9abdc35b0e3b612fb00a3b8357b133cdbf8e07878bb85e3b7ac27b5646366c394ddf32aa158e8780daeee832811d5434e16401706a6dbf1e8cf1052f057221fafe0f59265e907195cd05e497753d11019f183f83d410d294589e3ff69ba96c4e9d660f795ec9cc2edb70e85fc1ae38d022b2cee640045f23451cf7cc6280cc89e732866783a7b2cca3393835dec53eb893f5a2f985098c254b075810779ffc877b235ba64a2b62b04e5d6c560293f593c273baafdc0b9bb42e4a34419cd501722c153dc667f4b71c50f4bbacba3a25f9f899034f058b37f8b79775ca84ac2d1e529f43ebf3107f3751365049f92361b22f3f51a9a3f939c1bbb826ae6e7ea9492b340a5b03b5fd9872a1785dc3273892c08eaae69dc86904815de93cf53a30e515cc1531e13ad5966fa531696a86386290622deb8f056822f203158d3d5aa4851bc4f8a0a4955684022ad80f8c59ef30c4526e009b6b63ea5c9e40b6f319b1f466891daecd9b9497026de4275c74b82ead542df3f4a5e26d73609f545dc0e278e4c6c9c328ed1079d96426836e45cc021a9a98325b4b648686"]}, @nested={0x58, 0x63, 0x0, 0x1, [@typed={0x8, 0x70, 0x0, 0x0, @uid}, @typed={0x8, 0x55, 0x0, 0x0, @uid}, @generic="603dc830ec7d42a2a043ed4bf0c050438d1999ae4e69bedc634029970dd312e09d5b50afd003e8b8", @typed={0x8, 0x1f, 0x0, 0x0, @uid}, @typed={0x14, 0x1e, 0x0, 0x0, @ipv6=@ipv4={'\x00', '\xff\xff', @remote}}]}]}, 0x3498}, {&(0x7f0000003c00)={0x4f0, 0x3a, 0x10, 0x70bd25, 0x25dfdbff, "", [@typed={0x14, 0x6c, 0x0, 0x0, @ipv6=@private1}, @typed={0x7d, 0x1d, 0x0, 0x0, @binary="48bdc976abbf150d7d9a1bd6b5788289490c41f0d1bc0c86025714445a231a1f397df36dd120b081afb3a1c7dcd4fa1a4c5a7856c18e28d12c7d39cacd5e2915bbc1e44bc423361f5a298be1952dec43962a6baafd6bd9ef7ee9862ab54ebc1bf4f64d3858cda19e33514ba2652948e9fa59817d1381a601f3"}, @generic="c3a6e43e48f963259391e947f6e0152c0553d93a9c70fa4f1376e83475df4842ca4f21b546a17a7c0e7540a2", @typed={0x8, 0x15, 0x0, 0x0, @uid}, @generic="e469e04a77c06bddf70f1c25d09499ce91a3dc77e977b9255d93b06ca064d92a41839be931943dd43151cb4863f1892d3311d2ad7bd8da3c56d85b7675c55a478ff311614da1c977260b2a979062b9d7e409ae1f1a3c3bd776aefd473eefcb1b38088996be4ab2e7482c47c1f130fe14d7f3a906e1ec7f3ce89654d4fa6c7eb4ff5ce43f965fbd1e409fc3ca7fa5f5f7e37f7bf1e80d20ab9eb065812aa0bb8691f01c9a14df46ab6791e6f91c7760a60b9cbb0349f2b9086e47992d0cf9e68d702b4e1709", @nested={0x350, 0x4c, 0x0, 0x1, [@typed={0x4, 0x3}, @typed={0x8, 0x8b, 0x0, 0x0, @fd}, @typed={0xc, 0x1e, 0x0, 0x0, @u64=0x8000000000000000}, @typed={0x16, 0xe, 0x0, 0x0, @binary="91e2466809ba9981ae2983788f925a0f7db5"}, @generic="53d7c017b6aa9bbca8fa3bb0ec1fc26e28a9be73babc2047b5d0fe9f6053d907c2dc0c402a39af8e1affd55d8ea112656af6b2d8c8365ea81375332330e2f3594693a7b5afc6a0cd60d5ed469a0e1fce3ddb32d7aa3318cfbaf0d69d81a5ec23046053767243c46c5d2e1a5c8ea3a4e24845bce1265758ccf85a3d32e16c9b5b191a580b283edf3f6bdc09d7a8a83333e8c7f581b932015f221f99d2b7338494d0bda53a01ed45073c70fe9f", @generic="452e09845665c68d8139e15624e5b0a6c15992816259d844ac7923a592c7cf224dbb1a80210d99aa62ae03041a6714ebd76005e05394bd71bf7ff5f34a10d0b9becccad51632f6790882600ac59a09b6e6c8fedb646ecf437c8fe22e165ac7f70c8aa0bcda453ac84c67edee53f0fee21ffaab88b5784f6ed83118204b0b1a1d2fcca745ed8ba1fa1a6c110ff1825e36ab33fb998e50b2ae18ce7109e390d047cc848817429ee15bdea19226b88ffeffa8fb70ed0485260bf30c11c83113f66b466a935464d7c0e58de4833a45f41e055db92d4f6a00409493a46f093b79205c66765123e2a2e6889227b40851989ad83248c0ba9e879a1e819c0bb5eda7b2", @typed={0x8, 0x59, 0x0, 0x0, @uid}, @generic="00474dd4ad2dd791bca50f506248ea829efc6eb88a58e1b48b8ce44683f3c6b73c24568c636b1d4a9e48740ed165d1302e6b9dfe4910886781582a554f8f437b1f15eaf0b9df5c54304b8dbbd256555878e8f33315d59e8924b8b7013eb83a5da4cbe96ca1d4b6a90ca5414833631a52b1df2898101affa7fbdd7f37e8579e05f43ab1b38bfdb27a19", @generic="e5d4b7742821aac12ccd2f7ec93fd67d9ba3459fceed399e04d9dd4213d7526c2752bae4a8c1a66ec1c40bb02985edfd2d2277d47e9e874eff8db9e1c9e706846f546b0617a15fe9a809d793391e6c79a2fa1e06d2a842b3e96814faa35fb768160d7a9764d1a06ad97172a9369393d5a3649c468ccd3d04ab3140da9e640ddd723af9f2af57a01acc086b097228416a697ca1f5aadcdcae236761b85d34bf182f4be847706e26b6ae8304f002d1cd08de8e0a0b321cb0a43bb90e27e306576f83591399e0e5a26c299a36803ae0f24f1564d7b8b2512ca0ac387a3051f3da04"]}]}, 0x4f0}, {&(0x7f0000004100)={0x3650, 0x31, 0x8, 0x70bd2d, 0x25dfdbfe, "", [@nested={0x18, 0x77, 0x0, 0x1, [@typed={0x8, 0x99, 0x0, 0x0, @pid}, @typed={0xc, 0x28, 0x0, 0x0, @u64=0x10001}]}, @generic="bc44a21cc9d568655f2098a2bb119cab63281db2b86beefa56c94f77b71a12ba23096539fe2b1160a5b40421557b8fcd8aa25f606487fb7f1d8bdd9477eb3aa3bd2cfeda4037b8001d4cb2cbe92e6b85cebc696b97a0", @typed={0x8, 0x8c, 0x0, 0x0, @u32=0x6}, @typed={0x8, 0x95, 0x0, 0x0, @ipv4=@dev={0xac, 0x14, 0x14, 0x12}}, @nested={0x18a, 0x94, 0x0, 0x1, [@typed={0x8, 0xd, 0x0, 0x0, @ipv4=@empty}, @generic="13e12eba5099012bf22649315497ffe675b52109b7f88ded06bd36395ef782107481c1cad9b738ef7619862e617d7a6972d9836e9af8a06c8fa2e221e6cd462e4cca23db6d9a924038c3a2de3559ff3c2904d72b79fcef9820fd9091749a429804eace74565bbc9bd3d5745aedd582258d4e859040999f81e0333c7b8b7368db5ca011d4239f6d8cb5577f0802866fe0161b018a7c44e698dbfaf98700dd43cae04edf5209fa32bda2a4dd0f87b21f6ba340c3f99e58829c6328aa2a2cfcb70b379ea49bb258a310356cd541d9bf02a6a10452d9953c95cf4ac5f00d27a1ffc1722c", @generic="d6e9d21313086023ffda876a586f756718c46370842b8a1434edf43560c2cf15e40a9267349fbda286a588926b81498758040a9e1b67b49ad138f5915191cf2d1e59464a3b7c4e69a9dfca80e3a8", @typed={0x14, 0x5f, 0x0, 0x0, @ipv6=@rand_addr=' \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02'}, @typed={0x7, 0x3f, 0x0, 0x0, @str='#! '}, @generic="6d8f0507d0137a18b5f2f39930b7ee7b3077d8b2761fea14d8944ecdd39f034260198ffb348dacf4658d032519f00bfc9f6e"]}, @generic="ca7a7f3ea01c144754cb0a0fa6e5a1d7ba82b2e6378bc40f8027db8b402c706b30dc8f3b3d62b0e9046658fac263f0b45e4f77598e8ec99b714199793cd958a7cbc6f401641d6b58364656c3f44e735d8a87f945adfdfc6a54ca91af4f50f3542a290b52b360672630fc9ac966b65db67268c5abfa59a4f53504ed525368209c8205ca4bedee1282138db844509061249e0b544210097c8258b507c369ab8e31a00f955571bc8d23b600a405", @nested={0x1346, 0x10, 0x0, 0x1, [@typed={0x8, 0x53, 0x0, 0x0, @u32}, @typed={0x4, 0x6a}, @generic="bd290633c339c3490a89f380a56c0f85e86f669f7ddd204d378cd0dd4821c50c3ad0c8eaaba73323c70b7e8f8cc98f6a3a6649b3378c1bceb147c56cede73722c1659c4fdfa943b6efdbb123608f9b3bf923ca23602a00fda577d1d4414d7e5d5ac114959638640f4c25f97f1bdd4f2bb2db39fa1d0c9523573a5cdcc1bd7c2c5ae5c44faaa6f3668d0e6825c418fef5d10e78472a4aeddb2b074e1d94ae467839f6cd86085b3ac6f91be4179e9a67eed45c70eeb691cbc7af028257258db61dc5e55c5486fd366577cb57e8b10ecf5cedf19d92b3707e3ec7b09417e4e8686804d68ed8a2209fa26c6d89df19be0901d8698f970631eb32ceebceafce7dcd350f22fdf5850afaed68cca4c3b45f438773162d11d1dea178e4dcc2e912f7fedfb206c9ace9b8596fb7e06c9b5e2cca3c85978cf1dcf38ec230b793032a1c54b7148e6c1204cd866d73f028a67b1f55c727c935afc9f85e35a9890c6912dcc01313ac9aea0e7403954b734a71d783ae98ed55f5a2428a9ef113713eea8a187cbdd3f8822334c9d70fa295db79f1081148366a8caaf0826076187445f4e9d49825d15c4ea4b8967bc96a6744b1cd7db2add134b93077bdba893d53a972e0390ba9884d9949475e5d91bbb0679fc27dc4fd7488daabc8e387bd5ebd7954e51b6399ee9c41ee86089ba84a1964f47e0b186403cd3d4535599207671768e3840ef40dee2bbcf7b8a3b4f0fc19d9bcc151905bee53738e56aaa80542abdcad3f37b7f98f367fd99f037eb00d394e7e7735adfe83b895d7442b01c71d0f8fd234e99d1a07b82d39b284ac45a2eb51af0e911691aa76ac436c3493cc6cae121c68b80b12f02efcf8e98d4169bf9d0ab1d62161a8837a4b18c72fc23a211c5193939c8fe869b640a04975ad65fc9101cbb31983b7231dabb9af509789566801c860282bd3add3b96c5334385052ca97a684850f92499b80afceb89421403c3b86ff4cdb0d6ee5ca4f69dbcc06af36d2c54f2f65bd84c148b656dc81b4cc188d285c3eaa1ee339d88551bb4915cbc7e33e6de31134a766e36b3973e3c7af9937f8a2c584525bd15445a1701f8b234b2e067eebfdc96a5f197389836a63f37f71fe4a786f55414ba7f98b8c1020d199948ebc0e94a5eb2cc9334a1e424561a2513366df9db2d5129038f7e3ce6d35a7efa9913eeb76839650346b9eb67b31719b202cdcc725b3a76b324e6722fc991071c67cac407f291e3fced78b06264681861ca9d50113af4d2735d1219e791a8eb8afbf5dd9157ff6a81d94d737de1d90942be707d19745902faf8736bd0b7a7e0e7ce7b5f0539ce3828364b26ae2da5e34a9824499233ee4868e24d14ee0948bae130aade4c96364880cd907cddf78ed205cddcced38b607e47d802eeb234aa7917e306e86e08870e3c907b231618911bccd120509e290f60a9b02b43c4e2ef44b0575557255861502cb035bfa5cdfccbd0f408a95851603a861d1e61526145a1f337c0e76e3de1cf66fdc134a6fde4aed7aaa1fa782412c1b4f3ce6c78d5fc596cf3e7f44a1801257c5928aeb35849d8a73b7864cd9a1408e08d7208fb3950a2e5d71bd37b6782735c2dcf76d64a876beb1fe34a5cfb6a9f8dfd1553b13609500fd5157d385cb45246b61d2e9bc89aa61ed985c77e1cdad2f5ba1411ddaca6e40a3bdfd997b0ef2d49ce41b0329b98c9a7735f3c8e18198dc6c10a61b297fe4100acbe3ed1c9d8afd4ebde70c70fde51e41b099954e7fa305d282876dca7079b9d4766183115662c094cd18b7418b4b6ff2a75c4134b342dc37fd4003842c20b740532298537e10a5d95496d67d8a6a7a4a515162e03b51646ba7cf61a993b1910d3ea54719475fbafe3d539be13ea2827c71272d591db6a1482b25281637c1885a5ebb534fe8249754c21fad772697be3994c3d2a072d557f936b2160e80b3a4085fb54fb639bbb3178c42848232060978d6e01278011f20c14fec183efe3c047bbf92d991fc7fdd42fd149b9db6b1c867ddef09a8975b7e1c4f2f401e637953f32b83d2fb781ee19bf957bc4e4b668f6c8084224d237a24220306179d3a2b5317247572f200c066b3f716cc62ab0501b9dfccf79d69c4bfce705c6126feb594dd9e8aaf154428c09aa9ba668a5a05a5a0dc48f9c9a1851148cb8b2331c09ac857a898afdd1736c540e96822dde260ca3a85ec45d738059fcaaba2766b08387d84f8361ac545c35e3e7374afda97b40e6fd92cdefe212cdc3a2d2877e589d020b3eaf807b5c90180a9fa5d87b127f38f9b60679ee445f3c09a09ff6ddc6b729c348f061859b574a15a54300ff5db3ec2e492f53b66bae11c85fea16899dc513986864cc4f094f78ffdd4995bf689ecb6af9a67141abd17cf1929c5a1fb634c8007e622908abbef2110a63b64c5bceeed0b79775d743b0626b8bc880a515879c30029b73a9a74593c0019716bbf14f9dd514bca45299bf89f3709332a2cf7ac035310b352c3e28e3021a15918ead15fa22d3a156b7cc2748361dbc69530f99aa9629e76c407bb7621f663ceb5ddbe5b12fe4b71793075f4a3d6e60591a2c2f26a961e7308e36135535db1b713735126792e1e8e849a694a95e0c6e03e2053508985d05c3d3460e1c0aa52e56a0330f9c2c80f2b2cbbf8a5387ee9fee1b3d2de34e2bdf9bb1cf5eacd1430e07cf846e5bced85f1e753aa1adf482ab6d9a2a76d4b0189da0f83c674e059d89983a73b5eceabadb3a5e1a7363fba23fcaf92b81247146aeeb6fd3f556c9238d63cc892218615cab9c2025f13e857c2bc4ae57860f74dc1c92d962fef1db3fb410b3d5762f199e03c303bba951a8bed31b069fc19749de4245b41a9783b9d4ca5d0e0debf13e0421a5ce91f60aa7f50e1787c642453cdb10c11e39aa4107db99815e196880729e7c0c3e781df9825fc7a6cef0912723774be637af1dba89bedb3673da8c4ac4a63151072a82103c3b912de1129e60418b8cda3c8c229e53f9b2c6b9826f9f166cec925dfb9722a8c3576162a9df9506a60be0844f2bee621e60cdcc3139d19bace03d0f32194b579b7a74b8531c46ab5249fb72d17085852bcda907057b5dc242d67d930afa3af0d824f7e0c775c11b8a3b5696fe328df3a17abaed68647aea87c08d3fbf1ffef7692a6b3a83a2e56161ce2127877006e3d88d1b125233b458a42fc30e7784b741b5564dae78e91e878e6a52dc4334993ef2549283955b2c80e05f83f185878916463eda0332d5ec6fa03d3a417d9417150bab5a7356b5130cc450f814ceac15dcc06c3e96e5a5a8931e1215b3841bee0b297683a80e8e664bd01b9e7fe729113b2ce18df5ee9ab17bdfeb5d9f200d842a51dd2fdb071cb133e2d244a663d468bc006315c6533d05c8fdc92d1e155db688d5aea53d586b0f79bdbdaef7e399594cc2777af9eb67e33372dd6e7322975f16630cb1a9cacaaf28943a8392078ade91e33fbfbe4c1e03bac816a9b3912704d02f2ed53ae05392520e4d07061151ba6c938f85e80b5ba296ca13825c8e6be141f560e9ef98ef61cdb080436bd92564998f3617458926e3cce65517ff65c59a54cb5d80b16455d3c962b8c86d5fa7204b6ecd5dce04c9cc732fdbbf67e6dc50bca3ba0b53255b47491b3c5d6042ac298330d049a1f95629ef19673a6b03b0875580e110041d04cf4696d115d15cd5c4acde347d136995d8448e25dfa9076ae2e5f579ff9be1efc8c5ad75a9ff853ab579387964f61d5c499c54b3f08e50a4dc345581b12f3ee3afa4350a07953c0ed016508c7d95ce7f8e76b593e54e05ae131827f8eb5576263c9974d3d498f532c39aa790fdc4482b0c3ab56e771fc4a5dec854a9e9421ddb2470976a74955ac3cfc82780918f1a856a485dfed15631ffb3d8cd614bc721ece74f86f722b475c9c38ccc6a194cfbef8ac407bbf314ec5eba9057384d2cb0e6f936a363e822f990b66279c33e483725715e04b94fc69fddaad1b619bac6227fa44443bd0b7e77e5c3b9d70ee48f060abca9074c53e4d0598ff2105be2abe5bdbece1eec65a1ffcaf24cabce8f88fd2b9c5da0e9ad6905f614b70160c8127e9616c8940227553090813924df7d3271de5f68f2d7c717839c4fb12482ecbbab30591ded5cc8bf2a7681aca0842896d4d776bdb65abd8bb11a19f98ce4247c2434cb6e824413dc7726e93009b5fed9e027a20c74c2630496796d2577d77bf4e0f96eef1cfa04eae42d61df3b6a703c7f23942239b47889f3015c9576ccbdf9ea19ff28060cf6b4ef8b4c7640485b373f15206a2ce56a554bd4d026897e13e407ab081efe8b5e0a9fbae09dd016e6993ac491c14d4d178949fe60e5915f0e2eabc9778f9b0bcf61185a3a3e514c01c621d5166e67c60989ea6927c92ac0fe8d6e77c87d294aa83d1a44e31f2827917bd2d97a952336ebcb778a1fcd9a3c7ff3eec0e4ff067956fd10363173845f3eccdc4b194694537a1e31814d24e32032929db9176aa5503809b3ad2f80817e34e611c51f627517b7dc7fa1cc028db09835aba41e1d8ecc2c1a6175c6e83dfa079e11fc79499451fd1b375a1cdeb9f941d0c9b8ed4f822ec1e83f4bf2959edccd89513a85b3930d78659fc061b760fc89af202ec7d92c3159b488c327ad21e4628c796880b4cbeb4afc530293747f3b9dc16d6bae953e018e5f981f6f87c787325c0f03d5cd8d65ab12b06e834547c95904646f35392bb1e2624d09b6986632de18a40611d58e6c51f160cc23079fad1323a319249d659a8a8f53ade5e423440dfa6cf93bc0292c5f948bc4e54a4775545262cc001df90b59d56c3dfc9990c874a719602ee9a8563ff6dabb07f79d3f25514c709f5cf3afc1e1f4054b2433c7af38ad4a621ea70935185343af3fdc742a2a93c0933e5eeac9a79771ee77c10d49956137f1cef73c353200762cc858f61c763428b51d5bbbb72dabc88331be273bb3af1921e027493645848f0d17287b9868b9aa5873105bcc92dc116d124b89836e8696dc7e23d9bd0ade92cca481466096c134c712f8dfc7b2233c42ad2a6e058df90f782367910624d8f544e75f338c23557df2e2b792409f40124dceafb805d82808817eeda43bd57399af2d65242cb822932e17d27521417cad42735feae44248c3734182cfefd65433d47f524728159575182a5b7726ff5f7c62b5513e0e9f27048d5d6b108493c3dd138de89ee7207090efb9f567c7485104f969339ef5472652eef7230d39bc4b2bce65e522451da075b79b695f365f5d4652d7677dfffcf40e55b6653ab63131b74f78a441012856563708385e39457841f9b12787c2e8c14d6f5960c5c9f8bef8ed7651f0c9c97cc673c7306f52f9f2b40f9e855d51aa3dd91ccd8ac8383c44a813eac2d3cf3f0294a46b4a5ff42e70958afaae88e472934a7e6b74e5eb0f1c301afca66c4ecf46e65efa4229894570cb9ad3b42cccdd6ddb2185f03ff7d05fb1d9bebb396725f6ffe1bda03758c4a89b46c101d7a09620b347ea032d7184441021c1aeedbc41ade758a143012f77f5c3cb9bf6949337197b3295900699d76de2b1aa7eaa4272fe5c3db90f5d82d1ad7a6579c0c6a78b2692767e2657592814741c890646c2a8605af04481bd8c8d5a311ff1a9f31cb206dc45e0b7c8e3b2244280aa4cbbc052c9042c5ec1bf00127f2e40e04128f82a98854fef5e68550400703986f05ce90f6950aa62f922a7cdfdfa1d214e01297713eee52ac07577b63d91f3ec02bf87e56ac1076bb8547fcfb", @generic="4f80c693066f453aba77f81f494ad5de3f0bb0f189b5857ca173c1396c7af61d923d22ffde868c67643887f0dc47dd2b34f4c858687b65e31e1bcd2d3f1bcc415384e97edbcd36e05976d422767654fd817626e71daf7d640b9caef94c096bd4c3fb47ba551261ab0425935f87d3aab2acf82c7e8a50c6dea37e095e9f9aba1616274826a8b8fdb1b6d4b33066b4ca7daadfd8eda2e44aa58e347a8d5d556f76c73ea890b6bd39c83147183d0c13ea06238c218b2271f069167eeb8433e86f7d1d84b4ae7575651977f5642c3ef92bc398f284978cb883f455725a0a618f9e96d25f5a41f624857dd6032eef2f9bc600243931ed9ecdb7a4e79f6c4d4fc9", @generic="a02ac7bf3c0876ca66e95a176fdabb425f934cf86ae0c9c09d0c4fa369e6e856bb3a3c0660aa1b88fe124c28fb525b3c9edab9cb45c2ff6d5a68261c1d41c6810a0f6b35f9483d90a19e9177c05a796f9077daa82514eb8558b8bc2ccf26ffe610be501bc49535b36a4ca0bc4db8363b87e73b28ba0106ce9cb9ea6b4080e5f479c6025c3d6925e2a2fb0f5abc35a6c9ac9aa896a6d4d7b52c61feb91b21da9a25e93bf9d763ebd8654cbecf5f728965a303c36f2c", @generic="433e979948cdaa706dc09323d59b496a801c61625f031768c2ace4943850f60f4f227e04e551c7d0036b3600b31f6659ca04dc52a2582efb3f89e046a628ce98aecc0492409ae0a05604515a622bc32076db8e9178d4ab6171a556570202f592ba67104cfc7edecdd2e13cd44d68d6af45ee4c57a70155ed244b30d21006daaeb37bf12cc9cd663b4f354cf7b9976e06c5d5cc003b5b01f179896618467003c3d89e9a74a8beaafa35247e49ed60cf627e77020602ad650b96024705a04137a874a3b81ab26b4a735d86c6c2b522663e558d9632496cb5dabcd12e7ec1218840afcf3a2b27fd892ab32b7575cc862d4e8a4fd7740e18a351", @generic="ea763b6f8786", @typed={0x8, 0x76, 0x0, 0x0, @u32=0x5}, @typed={0x8, 0x22, 0x0, 0x0, @u32=0x3}, @generic="1a3fb0e9549d92ea07cac37f8933ce63fba6a8f00d1b4b59ad9ad3586232c1181f6da4d0409d7f6e5a15b0ecd8177c47be6dc1e226c34acc42410cfa0e7701f5f23015201bccbd1c79eaf6e7a60977c016d1526a6df45841bd480a4f4fdbf53dd4b447654255578ddddedca2aa8df2c9f29f6d3cd3"]}, @typed={0x14, 0x7, 0x0, 0x0, @ipv6=@mcast1}, @nested={0x202c, 0x4, 0x0, 0x1, [@typed={0xd, 0x69, 0x0, 0x0, @str='$]!:[\'+}\x00'}, @typed={0x1004, 0x26, 0x0, 0x0, @binary="10eec0ea280627c6ffb129df4047fb85a06c1aa28dd39b8ab80c8507467ba79a0dfeb30f1127ca80abefd9d4261be680716fbc6ce88613d7d7a6e82c760f06eb29eef3689f48874d0f96e1c382397732c33fc6d9597ffa7169a0332bc139a455a9fd208fe3e9e57f363113e79228c1a44a7ec27168406ce5cd679d53488620e427b72b2e5405fc5f9336c25d6e5c6d7b195eb4dcfab5541cb32cc80d75eb8c5a3f90193c886a2114be9a884de6a1f2332439133e27ace87e26268caae035ea27d36948a3bf973645727ef5e902a49d69b331824773994ca5164ff4d1bc7fb3872d31d4ebca576a500066655bbb25c36d42d8286c76a619af8b6a7cb0acec9481d34af71e9a0e0ba3f3bc5595737ce40f7501674950414d98cac28ac879b5efba2f9aa23e20d7c1fb591b8afd094d7d54001895b55716c80bb753637c8d41ce51cae5cd4332ab3c353e16fb75c3a85872a104abc4961e0aa8d54f46b1feae6075bceef2f24fce4c809eb82a82033547bdf8fbbe2c96ce5f20eebeb772805a64f6d9cf2b20c93e11453140f34119b4ca1a5ec954177838efb19e5baaa1196a7ae395580f9d3097483f237eef4896a9baef56e539094aa4b67bc1596ee1af18630b5392dde47ec52b56d83bd77982f7b231e6412a2bca83e10287b48aeff87aa5935be347a3633f56681e0046dc78c6f7739efe833e59bfafab0e4871b5aa971a0142f71f8eafcbfda78c69461f5b39df5e7372b733f2b3bd6922022f68bb0884b51f171ea992f3630bd2346fbd7e830453bb3928255b0901781cc68a5819753a6858215d68bd00c684d5ec0e244741aeddf9b433e4953e2ff1c5febc781f761b686061efd4cbbee40a5aff2f87f8f93301e2d79080c5eb59048d23bf70fdaad055bbab0acf53614e0b0fc792de8e66dad665963c13a7dd16e8eb1a6d69501fa0f4095132f460ff85fe47801e2ddb919d3e225c32681c2a08b1df57aab0813bfe63ed76751df32f23ecacd541010a71bcae8916034524ebabf9bd768f1fffa2dcd9e9116f7f30c7e275394911f94c65f5bff10fbf4159b85c0994e70ebbc704213704b73a0ca97ad36e46969558579dcb3f3a5635473b974924cd191327544561b2c7bc1577ca29f930e717eb320c597237e7a25e9142de98795ac7052c3d310b32849024f37cc55a01df8221457edce37c40818ed40ed7af52ee529dbb11dcc28127f1d2cef3a1e223ad1b9430f50386906ac8ab3f161856afcce4c7a93ce8b3639c8515b05786e6b21f1720c9bd7bffd16d506062e51c6c055f73441f205db8a5bc33da6fbfe31254ffe7903011a99141f8b516c9126aa75b4e8d311cdb3b06592c154e823768aba66909d9ade6c9f8f65eaba20beb534f12aa6448c15b6e67967db437cc843630dd3ab0b7a8d3c9bd3ba33e29e7b71f7881d1e3eb2b63c6488a1701adf0d9a1f78087832fa56482f4771d3ec185b15ff595c8b5774570baa81ed1173ab40d66ed225fa492951f067747f3f2e8e0faac2b80e47207e3acb2ecce1a2f46c2d19847b77aa0a37ef235a4a93008ce7db32c71684a5037ccdc99dd257360103d9f0c7e3cbb3571f9f4f03a36f3df24c582b10a0425dc834f3879b9a5a789713d32792329e33ba1452c413b6f9cc5aa71ba59ff0379a9703a3ef8e357f2cbb91e63aca042f26d988c60ce3ab259ef1770adefd5d852bc5fafa72d12f2a3e889e188f774015a7d142a150f3de931bc95146d906c38ca297ad758b71eb4a1697af66c3a5e5fd93c43d6273645cd2cbac8dfa99a1f1cf7a71a07c874bdc68dd63289e63d1ec001ef10c6bc01046be74a69c91ae153d6d73234b1e062e5e1ee925bb4b4d44ca26a09b681b62d683135d45d5f9fb860ab98eec49138fbe06059627218a7dd2264caf66b5349a271dededa71a07a2da2dc9e885c6b82146ca9616b1478f8879bb7fee0a88e69a7d8177ba9a84bd4e9304c56ce5cc63e55a302f30957efef7be1f21c110f8fa9abb1a148061ab31ccbf37ef0e967de773873c0ffc1b92736145ade859aeeee16217424d9809fc8f613fe8e949028a968234f62a32cbe0a118f1282f3e6ec54b32dbe7cba4e279ff9dfa641e622265112c972da6e466dec9a53428b50ebf2385357888c0502cc6012d93fbdd31e5c6334e0dc0100062987c3bcee1e0387d82cb159b5b67ba39f45f38e996c4e330beb02c35b318035cca031c688bde0b1df1a7ad959448c909e470703e7f706b2ca11f01ec4e6e56da1e195cf959a5c4f97985bf536839e824a2938205ef348b521cf1a922d89df8bca4a4172ae2a5fce2dc1ed778b89e722f0c5d8ee1c636c7625194911c6a92fb55db5b2c20b90f1e8647bcdb69d96f4dbea519c1afbc56424f4bc429579ccd08fc9ec4c64826d1f93675a131ce2a3c8ce3f6c54a87ca52ebc66cb87f347e884e9bd3fd57550bac306891d9aa35a4342c8f240bb1c94385f763624ce6cd365062c8b5a191d088a7d876853924d380fa019b827459957bc35c025313036af7b58be212f0db31c322b6baabc316e9335f11776d776989540da63367f59c34c589e0584283c6890f4ea635d21fa3fac8ced154939309dff53e65b8838d2998efaae07870b816663c1344989f24835ebf0a0ae4c4b7cb4eb8d4276575f0c19bcc359d10b316757e060e2587bddd01ffd14d44854e52b9549855c94800e24d3587a1b43d7a49a2590876c23f8dc47b243fc7d8d14fc6e126da37fb5799c76ad17f6f966f4922ebe7fc5bcd0783e5eb37458a6412f4319ee4021f67bffea6e80c5430fdd4638f4ef5f7b86d74bd0735f0bc2912d34922806c1a68820cdc8733da54d4d99b7e1bb9d0eb1496e06dfd07f0e1b6c21e0a8250c9bf873e566df8b050d37d15941ef3ca078dc5623af3b53a55b88fcf88617430c41b1c2541413df81d13662af228866891a8827c6b7e9ac174007a7c6a57e07e3a9761611b470ea6d1a1bd35b02e48d73294f3ed994db905a2593fd7b689ae5c12437df90adbdd32437ebf7ca350f076ad2ef141aa308b9c1d03866f916ffbf00c42e93ee437da12c507e4aa0e986abb45229363caefbe87d18e50aba8bdfa8811c973a3634e9fa0b9828691e6faacc03accaf3174dd592ad31b57944902db28db2c77bdd9f92d08c3d53e3718c70b5c718a0f0243ced61fa09ad6500a21e261152ccf2acf195e13a093b78efe2e7543496b28eed9b9b7e10d49a2937eb53fcb66373d53e2b4875494bdfaa350d2c0ca87aba8cdb3f7afc6da9d60610c69597b946a799bdef362f9a8ee071ba375c8c93c605b84b71aeb639c513ff55efdea4fbc179ff5dea85567f7d46333320be9c0f6d27fe7e6547be227cba358b83487ee42f5e10640ac4123fd67d511eab764d01f34fa28a4dde2c665a28036ea1aca79cabac524b99fccb051a6f3eeb21536850f4ae91f10f3ebd4eb83e9494da1924ab073985bf7480947677c6ed92c60d450c4351e1c3287d1fe5fc39d7b569eb5eb890ef414b765aee4f718b878475f88592ed9520f6bb70b2cea1119c3856b806ff4d7434713360c0ba7425f13da43d43b798bfefeb883947243686c98deb50f9c2d71667088912499507cd011ec91a127c7c1f434e7d89fce9a2f220d979271b4cc971368bc5aefac1b1b8b34c48ba85f634cc7ee48d63963b5d200da64d60624e394f258514e4ec5c8712adb637aa7bb3ad0be8053dbb05649bea6714db46cb3807e10c655ef14ccb5a4d14767abfe9f6c18682bf9f5d6a6ec8d375d836ee828fb3b6a3b79d2d60fd9dbdd826f60835c225d272093546140c0dc135e369293612c70bc04661f9abd7698b999779688d4dfa4612199b05dbba097542552dabcf694322e18e81fba156d1f4030dc5715530a7a509e454301cbe87d7859d8435f849c1818363745c9c034cebb285be026c582884b0e36c1bddd99f7fe07c1c4dbf604fd3f624d48345e04d9fa53837b854a68dcb2db5dd1cbf7a4b8011924c7ca7c444f7fce3e04d16393d8563ad4a2fef34ecba95727927a9046f17b8566b6fcbd8c32779707a6f9d9f76d50fef8da194f800c73cc12092f5c0f7b14a980a3361de26834a134eb2055043461f7441e8ee45f394bd4be05a5ff5a2605dda0c11782f9d5a4372f6cff7f84e3a8dad34c28148946b021466546725119c12c7049b03f73c24305d8f8bc5925240f456a20538a439c68a258d1f1c27929f161ddff74cbdac0aaa1cdeae45b2385b93edcb7aadc2b9e5ac2f6dad62fc7ed562a4b96d0e01ddf9f1a3e11f36362ec5c3a7f0ec85edf7527293a231aa5441e1808c9fe2ad7492ae432564019255a08c12a68a615a02b130971ea1f369421925550f239570e595ce16a40fcc6b3006d205868ad10e955a64eeb854931cb74bd9bdb965502e3ca9212bc9d339e11e2960545d94af276497b21202a9c194d3263f62fb4087646b0f68cfc6d36928fd8df8d014b21cb0defe6ad1c79d4a9ba7426420999b5c35b8b6292082fada1c10ae55ada6959208adfc1c3fd1d58c0a54185be1da0d9610ba98baf016f952b0ee38be534972804eb003252a6d1894f0617cb955d09c6d3d4086452be816daeb7775a61690dcc8ee0e432fca4a7a481c0d85ad34cadb4e0960795c9d4a5ccdabffb957fc59acef2f34d4500326e9f52b43f863a92cd027fe4df8b899d48c4d0914e0db78f9f42d30a7bba5c7de2a3fbb9bcae2e23f5159cb9d2593bf181a434a3b361d321a8079c6178b9fb26ad42b8bf2088b708abc831acf16ddc86214f09668a3fd5f7cbbd1a957c05a0497e4b113809e09dd70ab530f76378dd7e785eea1e172cc789ab3942df1373d1dade393e4747e9f8a1c905ae1f4d5fa4eb215b484d82ca5116bc88a9122724c9c99aaa3df8c2767ab1816606bcbd4758cb940722c812a274a6e7ca18f1182d57b11a9ef12a8a083575d7dd45f75334e3978201de534a79c2e96b88ab330d443910b4376d08dd7d38122a255f88b6887a1c01a71406a47135b688d5c979ecc7b71039ec3f3bc9ccc1e00f4b0e76c5805ad93b2af887e84edb94b3209cb381382de26f3a9759cb3f7f857f141eaddb266c2a58c6ff2f3f65ecfaeb9ff1b8483d1c22cc67d3fc91199c1ec878f7aa3117c42938a0b62b9b8cb3360764170122288706a94bf45d849248aff5e69463c05037f68987606b30db0af67d20556c1a7ceac79daee31bb07c7db9b2f2379c3c04de12d794ec3ea47832615ee9fef3b58c4b3a245316ccfb2ea5f76bcf6c73a439d156fd38bc108ec7cc53fc83dc5037ee777367ad54553c18c228af6e10d9f79519b4f7ad2eca0a070980a7a2db25de00516d6c3bbc52898c835c9f3ee42695af5c68be660867ee3857a16b5aa02f5a7a759764d24c658c528ee68ff9d172f58bc926b554fb22c7bef1691814361eb918373c628a3100437d9be56b97049527d3f6bf500df20ea42c988d420df4f2e0c7cb03ceda87d78a12a28aafcc36095e1201c1efa717cf8200df26edcb6c34ff315a32b89b64e5b1d61f80388758f68cabb5dfdb89dd426b3c11d7f2d8f1101e1cde2aaa85412d195efef01c09da37b5466e7c7bc242967d839e15c15f63b046273b8b6590541cc76b70ee298086e06192126c893dc9a0fcd12661b48654e88944735fa21e60b5f12caec7093d7b4fd9155cefd251cb7b81f3926d61f23d344dc97ff7ee25f466a402b8d28655b2288582eb72c8b4794f1ceaa33fd548410b5b6f68e702fc5ecaee68e9d90772076d76a34609ae87aa6f07f10"}, @typed={0x1004, 0x4, 0x0, 0x0, @binary="4c9365e1b99539cafc93e40dd7b8a0656f77e06ca01e950e4aae6bd06df7bdc3ec2a2a196ab8b5ad3dcaae2d13c58cfc93b60793612c105223bbe2ed2d18c43e0ee71fc3231c54a1a855da1134d249d7ad2a0c54ada2eba9e151a62d3a871dab190fc0186cc367a4726f1a9a470930649ea901dc22264d1793ad45fced3a009a2569eb90994d81a40743c1bcbefa98a9188524091b918f898cba87d9b4e4345712c5848ea2538c14b0ce7028991910c231bf76e9995bcb3ecea9eb56d7be7ffbd556fa99e79392bdc579cda64a8dee66417bc754977a4c0bd1fc5df71747db2d21eff41cbe5352cb6661fbf1ced92aabce65ce3c23f4bee26adb6f2ecd687ca29b8d4d172e28eba023490e460898c447ddc6c1173e999a4fa1bf4fa9357080d62f626523d6fa135c9a310ff24f507c37daa2d42c0e125f0e248aef7486d8ee7b6593541ec2bae9a3bd790b9419a808dad08f2b80aeb49fc768d0f9eb166cac072d669a56a62be1569ee230c6187d51a5803aee1046794018e060189813e934590f9a6d6271034769358d1ec1f40d48f309011faedf8d6224cf90f91017db5c91ada0fab472e64cec8744585cff67a651d64054959084206ebe582283f92e0900c4f25d1964edc2b3058bce15bdb0f76158eb4c359d49064b741e7e0f72c044fc49d984365bb09c73d365d8f806e45a4248695543a4529b88c587470750e0a3171c613d823ab44a8ddf8ff826e6eda1a378bab1205c0bc0ea1f06a854b048c451704222e3a097c8517c2c07e39b031bb4d484e675d88cf1a1cdc9f6fba035de1ac851c24a047946c0a80d04966406e886e66f2203b8c78fb8de17e103e7c97b546e3570e415e87f9b5b4cddd0a3b7d21b7e0ed23b743c392c0509b39dbed6413ecfaa5358c8fc267de38e8e2b5d9fe5c80f162ece8f82566b24187b4bdab94c56db28c542cb342f0b7aaaa22ee360b209bcbaf4a8565d8f2e3d2d7293e41c13bf619658c159f82893ded9f36a8c2a93a7a9c5ce7f40b20455604b87bc50be4b1dca437be6831fee0d8e35da3c090b6a5d816ee29fc7ea3a282d5b3e8ddc3a3b6c07cdf8f79630664b5736afe12161a3d5d6924bd7de05f21937cbe5d0e466e977fa725398b3efc88564ecacf2e29eaf05cd3897b95eeca39a4ed9231e2b77313ca3c9a0a5c82b575703c8bac5d4cbe1ad80968ee63fc35bb19c183bd1fee1a883143387512ca367b301ec5ee99bac11591b2f4daf7eb038205baaf61341248d017ee332a9368898d45d14fb02ed3a0e1be0a4d7616b280b4b08d30314034c1b4a5b6335f189792514e430ad94f4e3941ce507a874f76afc97db7395b89a8f67e07057109ad338b42d116ef48a1035c193fa4fa79e3a991008f3f6710753c083a24116284083881b3ae37e0d7cca98b2541feaf25b74e643be9e8846f218659c17fd7b22558614d6c16c86b73df4828c11ac5c62887bb79f25a479b9a90f2a6167c734acb0c4a0b9f8a405e1a380dcc8416246a8f1e2ba3a3da4e2fb1fb3279af8de526ff269fa5f77ff27d22a0285b979a6f90c45f864b31cdf4bd00ba577e2fe0fbe18c89b89503b40943e22bfa085213ec554e35cec8e87b9a9410e315ffd819b3d13ed474601b44d628d91175cd9618665580a35272a659d540f9c38cc55d66ca3efdbdbb2e9b4580c6a671aecd202ba7e66edb81931042cc533955e7037035256ebbe5c760657e4b944396f6cabe6ef439285d70d65ba36c40bb2605174913e4da8709b6117f4978221cea7398892b870e5b7c3daf340535ecae73617ee8d7ce42dbd90a3f1be6840b62952cb6e58d1f22479b2154213efde199b5a0742eda8051dbb81a34b7dfe98e445d3a2d10a2d96b3ed62c9f619432bc13161cbf99384de2d2b10d0c8c8737f1f052a1d841b67c1f136dca3228dd23eb4ae653df2c1469ef0a3755eb00a538e2cf16c484cb9915a95f7ce8ea2870726642289a8187f93915e08d22572f373659c23ade065cbba32f771daeae1508e7180b90848997d2a260a21368d7f2d2e5819e4671a7ad66dfb892e9f376386cc94de6849c36fe63641d0939120cc32f18aafce416fc95333a0066a6e94976201dacd855efc6e29c921ecc90d6fe1c3cf32c6f9ed6f6c543616af1398a6ac1fc79a96241ac284e4f6ee616f30e8eed87000f3bfcbca33d495255925288c3bcd31fef951b9baacdeafe18df7b267425481e3692ad11c9b3c3d50a3585591ac3f736f9f38065645132155d637757d04da9850a88cc91420197e773aa111548f6750b971386ac73481978916784bb2658f148a7ca831f24f9f1da9276c5a158454840c373c1bceb013ed3d0a5a978786478de8c9e5ff51e97bc63c8341c30c6217d56f844f4a48d9431d2925606b3a61e077609c823ec1c13f76366b0285eaa95e79ff63bf5b4a18899486c325b8ef72a31a0ab69d09cee4adac8a242c7f20a785dc3299e12e3ef16591468e4db17049e5c48f6be4e2375ef48f222b21f037d57f18dfceffc64245d9f825f18ac5825a042c3102b110ecc43e625cdb198861ec1a9da495512f03ce862b80e4b59b2fe32086763868be64a09b2142676405182179188d95ca7b45add98ae5c03fb96c8643c1943e46718fe9fd83060c991168be57386338dea3b8a29dfeab35ca450312f7a807e2c303d91f0674bb03c98d8ec6546d2e25d6b073e0f7a43b65a80fa59282ec69520bdeedd8ddaaa12a6c248ea0ca724f1a2bb1f9e56e59f36794128bd5d596f575af6ccf6340050f6001048dc8341f2b0f01c0308308de66d9e16136f4fdeda5f1d008a803a9e32a7d92b35b777aca7f2d4046214e783f0a92f752f2131beb785646c19309ac9888b7683892c0d7564548742c7cb4881b994462ae706503d732c335194019b3d4815490c1290826990a9a4b7580d3fa8032f06d3a5494dea54d73d6c1f956012631eb524a3391e4fa54fc96652c8e6abc0e0b75d3e6dbf0abde878339e79ae4ef39f959e5658c4dde58b95908f673dfebe5763d8200402f582c2203a9d68c37c257d05d5781d044f6a1589b7eb0b7cfe801b3b531d0e24838d758f7f34baf1d474893ba3abce406f28ca159612400fa3380d99fea2968391b1adbe43fd11263e5949579e9a1f3268f072ddf76a151fbca1c87abb46a1ddcbe9bd440f40fd0a0b206d334f6e68a790924e10c9eb8bcf1e7ee8415193aa2046293652c91309c14b4c34c5504b509e98b70c7a18e9bd727d5bde923a39a733838f2c88636eebb2bbe879cca4c58cede033bf11f7f31caad3998a70d8034cd233150a8a41a5a2e7f84a55f6f8de1da70103e2ea115b35ee848b47f9cc3931855736638decec3211d8ce82ffcaca966f3d07ade4ec67fc19209e1c4bedb406ab87fe009ca46cd35d63cc7fefe9e6f1bded3ed7d5206775db8b8a60d6c8c3aa157ba2e0a17001e4bd9124f65b8cd5249fad6cea1d33da124390307874c27c0ffa69b30d213e84b2284013f1aea08a980a19c53f97e9ce16e100e1b5fda7cfb999b2e71cb31b896a7c5b7cee6143a7bbe8a0be47284b3d40774dc17afe3da896977e25b30c0fa77966692115ba8a3ba0415547dbe1625769ef6071c575ee1da8ba0de001f3470f2dcdd3636bfeeb16bce926347f488b7b9f79a919868fe004155bb84f40cfac783daaed7b62a5a68f8ae97a35f90eec668d36e8a179cb262a1fcc6e86785f0351f5ff671ff1db82e846b6f9abbdb655a824c3cb7d04ade6c12ac3d2593b0f78fdf3296145e7286e8e46603c31a2b7f4b19c7b5092c353953324dc02b47c33f4c5d7984e9a3f957a01e1f9cabb6c470438e8807e71dc4399fa943f5d95e51dabbd7851473fc616db2b40eddfe5489db095c2b0d69777d9ceb49d9d9cdbe57a25529193168df79b65c4d3f8f3c860d49e170658fc66b8317e51cc4f76163db65cf63f24ebc6fd2ea9fbe8f5b0384b591e4fb72c9e76bb146d10cf31f302f61fee838d9848b9d9640ffd4001aa70bdf96d5c1eea7fabde27358ce478d2d3887e998875aa9a065cc5fe48aba5fe06501abb28374c19c5ccb44940afd112a088c211e637d8731d66ca0676c8a8836b008148b0d4635b35a23c052a2bda3626342115316ad0ac3539a96373bf1b07917b22f28598c6b2ebb6bf3bbb50898536bbf46d97a984c5a689cc21a2ea35eff5ae8d9f3aa45c139693fbdebb9739203e394923365ab15f0171a71e2059d1470b7ac175a7c698ef538bb3cad1c6c8ebd081fcfa83e3267d568761c0c0627adc64b115e19b2e1d1d8754c05f0fda4fabd479d0e672c4977df2b24516a2f179ac503371806f49df55644bcf9aa9e652a8cd6979ddd067f73090156baef52ad8f7329adfde05748e89fea77133c98702e8c733d982c7d86a575e0d21cef175853719fbc3bbd9ffb36c15048a6e84d029dbb1026fb2349e907d3f8056fff991f518ee91fb198bd5a85a489344e7bf20c698856e4790768ea7c6c7348d42c213421eb57ebf9ca4ab03fa32c529bd78cf775a4851179b3030764225915d3eb6a4cabf7e35a85aeace82263f71aff6417f48bc5844a1ef853a1b6c156216e4257280e4ede49ef1580a262e63af587848f43c3a720ae3bf68c127ded68bea321142030a9e9876be970f43b940aa1c94f59c653f48a8b5dbc9337cb74d1cdec240f9a4fb082b7e19cf0343195a364c3fbae25f0f816ddf2f08b0911f2b7b16faeb97c637f15b2a1f90814e9ea869e4103c92b7b4ecbcb0326f989b1c818a1d4607c28adb599bc1e08920c4c11f9f8b739b30ebab64d048c9fb0eeb8a6933ae76577a9062170a20af69512d173853ed77bf2b95081f0e808ea7241e5223aca34237b0cb47568ecba3af50fe53a4ee6a1a274ae62597240838125f451a56f8b372692ecd7dba531c4fd28c5a4c1ed9eaadee87b77b4830579e82c341af9d098826bef9f2b3cce99d62b3e885511e4177411fffa8b7191f09c06c38b66b681d12dc2753fbdda5a50d79483f84139585b795829ac749b682f61e582bc4fa431cdbcf7f2b7c6aedd637e0802547035b48fb9a597a29d334515eb4748062449b01546c998c295a8a52e376f591afe49adf45e8b1a473a6b9fe55be02c70ea5161a2ab472b76f2c6602ae5b508ca6126141e92d10476e5f453a8244b520aeb78354a62e7504ed3bdee660b813ca644fd638de486caa135ddb90d7f1176081bd2ca3f2e2929822d792eedc860335be1d09a4e07cd93aa9827b2001938c19b84c50d6bd6b8fbc71f0216aefe227d5bb5a1ed167485282fa7e801a1540de72f989cc65d9d44fe3a0a61d3359e3642b1d9a42d1e1375785828812147a03e16bbf5c85ab943954520e5e25d8abbe7b6032e92091ca8b9b9ebe343ecd403da9ce81b9b4f2c47e3a5d4a458f6edde0ecea41f46bf3032d3ec7e7d4af8fb5cd1a9b916c65723d110cea6f17c7c352a6be9abec5f3f3799d50d5b69b556e024c482d7076708501672ac4bf024f30acfcbd46804f5f32af2040e7f6a7d52bde679edd33f33f30042a16c542768ac3545158084a08fddf846100c4047ea9be156968a4e98fcafd23b9e032e204b905b0788265ff3a2710b42f755a4d1f8453214c2e9e423f9716eae83f5406032e4d008ab440eac7ebec5a489c6b67d6d07a24dae11360248dcd47beb2e2f51178a0c98e920b8a31edb9dd890fd4ebbfa49dc1f117828d3146f62428e812dcd906edc82964efbf644aefb5bb743a7795822e10be0db08a89e1e41423ba591c68974b0d12b9f40a9"}, @typed={0x8, 0x13, 0x0, 0x0, @fd=r4}, @typed={0x8, 0x68, 0x0, 0x0, @pid}]}]}, 0x3650}], 0x5, &(0x7f0000007840)=[@rights={{0x20, 0x1, 0x1, [0xffffffffffffffff, 0xffffffffffffffff, r4, 0xffffffffffffffff]}}, @rights={{0x28, 0x1, 0x1, [r8, r9, 0xffffffffffffffff, r0, r5, r3]}}], 0x48, 0x8080}, 0x2000880d) [ 1984.644945][T28532] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 1984.658955][T28532] workqueue: Failed to create a rescuer kthread for wq "bond532": -EINTR [ 1984.764605][T28534] bond976: (slave bridge934): making interface the new active one 01:55:34 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0xe602, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1984.806011][T28534] bridge934: entered promiscuous mode [ 1984.820480][T28534] bond976: (slave bridge934): Enslaving as an active interface with an up link 01:55:34 executing program 1: socket$inet6_tcp(0xa, 0x1, 0x0) (async) r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) (async) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r0, 0x0) r1 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r1, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r1, 0x0) accept4(r1, 0x0, 0x0, 0x0) ioctl$sock_inet_SIOCSIFFLAGS(r1, 0x8914, &(0x7f0000000040)={'team_slave_0\x00', 0x4000}) setsockopt$inet6_tcp_int(r1, 0x6, 0x19, &(0x7f0000000280)=0x3f, 0x4) r2 = socket$inet6(0xa, 0x3, 0x20) sendto$inet6(r2, &(0x7f00000001c0)="73fe3a96339fd1ab0b0212f99a46a20dc4e309aa2fec8573556b94f492bac75bed55bfaca36cf0fbe00c0a83da6ff91584ed0e5a9d093a566741a5ecdb51759c4ddf43b0f27ecedd7ee398e55de82ed24c8ea6735b2ae9dccb8fa82a088e74", 0x5f, 0x50, 0x0, 0x0) accept4(r0, 0x0, 0x0, 0x0) socket$netlink(0x10, 0x3, 0x0) (async) r3 = socket$netlink(0x10, 0x3, 0x0) r4 = socket$netlink(0x10, 0x3, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(0xffffffffffffffff, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) (async) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(0xffffffffffffffff, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(0xffffffffffffffff, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r4, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r5, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r3, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x4c}}, 0x0) (async) sendmsg$nl_route(r3, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x4c}}, 0x0) pipe(&(0x7f0000000100)={0xffffffffffffffff}) ioctl$BTRFS_IOC_TREE_SEARCH_V2(r4, 0xc0709411, &(0x7f0000000140)={{0x0, 0x7ff, 0x7, 0x8, 0x40000000, 0xffff, 0x7, 0x0, 0x4, 0x1, 0xffffffff, 0x9, 0x8000, 0x7, 0x1}, 0x8, [0x0]}) socket$inet6_sctp(0xa, 0x5, 0x84) (async) socket$inet6_sctp(0xa, 0x5, 0x84) ioctl$BTRFS_IOC_INO_LOOKUP(r6, 0xd0009412, &(0x7f00000006c0)={r7, 0x4}) (async) ioctl$BTRFS_IOC_INO_LOOKUP(r6, 0xd0009412, &(0x7f00000006c0)={r7, 0x4}) [ 1984.885591][T28543] netlink: 'syz-executor.2': attribute type 1 has an invalid length. [ 1984.985092][T28543] bond924: entered promiscuous mode [ 1984.990585][T28543] 8021q: adding VLAN 0 to HW filter on device bond924 01:55:34 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x1a2, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1985.058186][T28545] bond924: (slave bridge891): making interface the new active one [ 1985.066127][T28545] bridge891: entered promiscuous mode [ 1985.075618][T28545] bond924: (slave bridge891): Enslaving as an active interface with an up link [ 1985.097099][T28549] bond365 (uninitialized): Released all slaves [ 1985.188688][T28551] bond1029: entered promiscuous mode [ 1985.199221][T28551] 8021q: adding VLAN 0 to HW filter on device bond1029 01:55:34 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, 0x0, &(0x7f0000000080)) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x262, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8}]}, 0x4c}}, 0x0) 01:55:35 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x2000, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1985.337195][T28554] bond1029: (slave bridge993): making interface the new active one [ 1985.345404][T28554] bridge993: entered promiscuous mode [ 1985.355172][T28554] bond1029: (slave bridge993): Enslaving as an active interface with an up link 01:55:35 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='blkio.bfq.time_recursive\x00', 0x275a, 0x0) ioctl$FS_IOC_READ_VERITY_METADATA(r0, 0xc0286687, &(0x7f0000000000)={0x2, 0x8000, 0xdd, &(0x7f0000000100)=""/221}) r1 = accept4$nfc_llcp(r0, &(0x7f0000000200), &(0x7f0000000080)=0x60, 0x0) (async) r2 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r2, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) (async) r3 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r3, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) (async, rerun: 64) listen(r3, 0x0) (async, rerun: 64) r4 = accept4(r3, 0x0, 0x0, 0x0) (async) ioctl$sock_inet_SIOCSIFFLAGS(r3, 0x8914, &(0x7f0000000040)={'team_slave_0\x00', 0x4000}) (async) bind$inet6(r3, &(0x7f0000000340)={0xa, 0x4e20, 0x800002, @loopback, 0x1}, 0x1c) (async, rerun: 32) listen(r2, 0x0) (rerun: 32) r5 = accept4(r2, 0x0, 0x0, 0x0) (async) ioctl$sock_inet_SIOCSIFFLAGS(r2, 0x8914, &(0x7f0000000040)={'team_slave_0\x00', 0x4000}) (async, rerun: 32) ppoll(&(0x7f0000000280)=[{0xffffffffffffffff, 0x4}, {r0}, {r1, 0x5064}, {r0, 0x10}, {r2}, {r0, 0xa000}], 0x6, &(0x7f00000002c0)={0x77359400}, &(0x7f0000000300)={[0x4]}, 0x8) (rerun: 32) write$binfmt_script(r0, &(0x7f0000000580)=ANY=[@ANYBLOB="2398769586de1813916dd5808c2520a967acf8ab1d04cf15fdac9483a5f76a4b263c2f4ce6280a3c2e300a00092a7c2ccb42c175adf29c0000000099951186e7e151e101915a0871d2d8482174ac19c11c1689efdac0dcb04db268af227fba965df4342c888f92389873e846401662af37f6edbc380b84f404d61fca1d0f2f4325867f7a4d998b52040887ffbab2b1fc4df5d3593d791a8d6b0521d97102c33a4484927d3e7af3c7851531f8f92a1d5305f96240ec65577dd72d01b2f988555366b2ddb42a57e48b078e34c03094c463a603f7a28c1f58b9eb576088969b10e3113607b1dad3"], 0xb) r6 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(0xffffffffffffffff, &(0x7f00000003c0)={0xa, 0x4e22, 0x0, @rand_addr=' \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02'}, 0x1c) (async) listen(r6, 0x0) r7 = accept4(r6, 0x0, 0x0, 0x0) connect$unix(r7, &(0x7f0000000080)=@file={0x0, './file0\x00'}, 0x6e) (async) sendto$inet6(r7, &(0x7f00000000c0), 0xfffffdda, 0x0, 0x0, 0x600000000000004) connect$inet6(r7, &(0x7f0000000380)={0xa, 0x4e20, 0x4, @local, 0xff}, 0x1c) listen(0xffffffffffffffff, 0x0) accept4(0xffffffffffffffff, 0x0, 0x0, 0x0) (async) ioctl$sock_inet_SIOCSIFFLAGS(0xffffffffffffffff, 0x8914, &(0x7f0000000040)={'team_slave_0\x00', 0x4000}) (async) r8 = socket$isdn_base(0x22, 0x3, 0x0) r9 = socket$nl_route(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r9, &(0x7f000000c140)={0x0, 0x0, &(0x7f00000002c0)={&(0x7f00000007c0)=ANY=[@ANYBLOB="280029aa2c332d5bea9bad39bf59452a34d9d8000030a2b55582f5fd1e6fdfdec12aa9c829dedc5b40cb985464b366067f5b3489057398384641c619000100be085754872b39582adc020000006486764c95b1a0d000c4c30300112b7cd9776bf684254a0625a1f185eefcd79e03ee1710673a0a30af7c8713cbeef788109ce3a57a779d05208325ffaaba742b71c071c7597d4c55bc07b4fa208fef7e6db9f1bef859d7360beea439da5b6cd2e3916253d9a88d84eb1121f46d4882db8bd52f042df9def409cd02dfcc0e1f8707ff433700b14433fd9e9d21e674241f64c91664afe4edb300cdccd800a51c5d1a5b7ac7200d62ebf9947ed9d4e50cd6b98e93e536d65f5436fe2000000080fbe4ec0846896a11a1a265487beef0048e040000005a4490a91e4c32f69a0f7f9d8872d3fbbd96f2f280fd1ae1409438e5c5fff965c35fb8af18e83f5cf8581035a6e3986840fed343"], 0x28}}, 0x0) sendmsg$netlink(r7, &(0x7f00000078c0)={&(0x7f0000000400)=@kern={0x10, 0x0, 0x0, 0x80000000}, 0xc, &(0x7f0000007780)=[{&(0x7f0000000500)=ANY=[@ANYBLOB="100000003200ff04000000000000df2544bb125fc6c1805c5a085a3ddf0d15928b4530b24134c6b654d676b9b40ee120e47162ccfe0aa1542b60b68cf3b504b42516e19e0882aed94e605c52cd5f8bbceefa5ac3c20bc32afb238930d6b1aa0a80d386bd358faa2408d81043c28322c9556c3734f360f547092f260e97a119fe"], 0x10}, {&(0x7f0000000440)=ANY=[@ANYBLOB="50a005e90c071c51d7b6200000001500000425bd7000fcdbe7d2eca01bd592f93e00", @ANYRES32=0x0, @ANYBLOB="520a48b39d9173d5970c8f1a70e859cbabba9a79f80f467292bdb3b797fc7b0dc2c03831306581f910ae4a5115ddf7b676b4000020fe4cc2b6a3868ec49c50d8d4a4716ccd95746fe69938e83a8bb968b884364e69ba8d608930b0aec389f9ae23efe673c0425151a3542329847e98a065c62b47f5362c1923f68a56d8f5a625544964e0dbd7747a7392a3d28cdd688f"], 0x50}, {&(0x7f0000000680)={0x3498, 0x10, 0x400, 0x70bd29, 0x25dfdbff, "", [@typed={0x8, 0x78, 0x0, 0x0, @str='*}\\\x00'}, @nested={0x22a0, 0x24, 0x0, 0x1, [@generic="c392d28a68ff95d73755d3eee054becca8d740d8eb26c2666ba30490e1704b5c5e4cf81f74fa893109ded1234be3cc913ac896cf97b6f8615afce64200391aab5b63e19c7d19d125dc0b390e2231e08f13fc9e442c6c6b225f9c3612e8625d413ed74976af71be67b9f6bb16ca5eca0fc2507a1bf10a9dec77a7c12d3b6ae0e3a04c803ffa42dc412a3611400636fcd776d6cb35201c", @generic="238c44509c16c3c0d7346bc923f7e05e03a5a8185bed6227823e798f3fe2710cc8c6158b2264128376ef19154fafb147ea3ecbd46e550afbc5e6f1cf15d244438ad7b86a51261d7bc7171e827cd722506ef3412cefb1c9f4ecf5d602640843e8f9bddf1894ba946e900b9f4176cd5442cb8e01d00aa44131c82daae1414c8582b16bc8dffe579452a02b57dc54ca6874a7e7ebf1c0a822c849e4257e48462f4e4c29131383a63a4b34ed4c9b3fff45818532c0bafb0ee1807f8ca7b7e06bef3a37aa77394a440054b4845b90421559867f4caa9806ace11771254509c322bfb3cf509611c99cfd01296bc580ca6e7954723e89c23ee1e5da1aca8bb401613e4a10c00d4b0d4238027fbb4b8905c529e03940a266d0af3f55f44c75d22ec33e56815379b0b34c68f933215ecff5f1fbeaaca6f136ed0fcd4fa635148d2d9db8b600f3a2b464d6d4b62d3a6ebfe668dd8cf628692c40e932ad9538ff774d2892bdc94403463f0cf73b86aaa98d99608451aab9f51049fa3c8d7f3e55f05753c50bb87f5d957b1d13687a1c1144846c9962bbc14c9c693ed74949c935f703a144391e797ad0575ba7391a85117ca1f249dc97a7c418b501928430b53d1bfb2d398526b37476f991d1735309f4a74cb7a2440e268acbf716edd7948c708153a980fc8a102e1be3630a2e10919a4092f42c9d85f29b7add5bd7c697f8027c782cc8753bb43fc3ddea3d48bbb747a485273d49841ee33f2c9d5535a1c4e7b7f42824c5174c2804c411ee179e6afbe691957c3412981dd87ac20eb8744e13696de169a3abd9e25b16d1af19cfe62f57696fc7efc840fad8b13c4caa82635b8b8fa32e00193b78054304aa0ae20001236f2e879662725c5e33a2347a7aff39e9700b6d4c18c04417755b7db27c9853a5a0089baf4a8454ccea8618d94e6ceb2716c8b0243a742b9ec386ba9e60684bb2cf26f2b32d60a56a812150932ff0877584e5da429976bf59d24a70228fcff7b3e9506f051327847e1b3bc399a0fdea35f3d23a54b0d70689228c698d02ec68c44cbb1d28e37602eec3062c5312350fe386c14278c698061f2017a60bb4e394417bbead4ea8fb6f3ad1be6abcd5636f280f756275f3fb582e842cf109bafc552acd4eaea412c2e42fb0a6295f3de373be0850484c85a9a9e77478c338233aceddd8e736dbe1359c203dfcabcae35a9a31b8a7bad8b0d68c071ae93ecc65cdec6150b9846741bba249b583599e5757d32fe6ae8508e9dc2210ab9b9b679cd9cf2eda6bf619f309f6322742aa23e79126814258bf9ba2ec38b8a2be3c4614bd2bd28ff454a74ca3a06c87f6344fd65fe099da61e3bbf830a7e576795cd63dd97e21d6083685fb8f8608530fd8ef61e5c04e06ae5c288471bd009c2ea33db5b2416fa28c35f515fc746fc8f9d5b35738e36c6690f53ca15e46594a6b3f47d924b1b0d5cf9ed91f8bb0bc21b39bd7c1f1fe8f6fb543d3021c7268cba231bec6c90abbdfb2ba709c7dc64f44710486962a7ea8d34d4a39b440ddc893bbd439f1d72405b334edce3f212387424c3f5b3126b64709a05c718f21f87a0a7a4567812bff2058f6f13ef69316b8a5701edf914d85e189757e9cb469bf665ba39af808523d64c463e397a106b49dd2ba5410b6af423d10440ea2f3d6650d03d65b7a0a17e88ff10a4787db9cc5e9f4ef69bea96f48bb76bdf46de2c5a8e47031001650d8834fc861f7462e548fda87aaf3d7d577c2ee2c43554e4540a7fc63d0c25488b58b6056a25da6acd83da886b508f1a9a1da583753b16f6a419a37f68298351ba3eab2925843229d47c8c87a9afe6f3860993595b012374fdee4830691d62ba6b8a4e7dfdfba57fb879b8da38e3cd2bbb1fffe8e19066a48a206e8dcaefe0672ffefca64f1762eafb62fa1cfc00fe66e00baaac7f5adb4664541449ee957f33ec5683105b7ef056dfe5bc93875371acbf0530f95ba6ce5394fc19ae5967ad8dce677522f3719915e9899700c8403d7dc7b1146b40daf69414bee291596f2774a848d1a24461ff0d78ff7a309afa13fd5a448a137abe2f85e67a4e0d333bcce8b95d09874fb253529850e5d67cad0576d230769fe4d6ae02251653b7622118fb0a5a59c5a225d49583c73ce3e0ef4a9e6ae37ceb9163d0093e590e067c2fc1caed574694c8bfafd130bad222e2ad16f95fb3cc51376143b53442c3e41d7713c48b6816f3ecb350ec6d6e63892de8037106706ab953c073ec8b88738bc47ccf185fc20a18ac40f1b66bc12cee2b161553f556b555a5a2d188df60f7a0c0f07e80a20b0907a65cfcd3f170162f73ac018e43d55d7e7488bf122df67266c6a33fde3d5ac33c14f2b0862ff9f060bea1c11035a8f4ba07a70cdd23c612d2ce0bdf4af0081c57ba2dbee8a64d2307554f1249d00cc97bb81b8af11d009021d4e16226ad4c4a19828ead98b74204951f2134d1b1077590df397d99fe9eb690002c2d8aaab88cccde1e50be7d6ee37f5ca32c73ff2e3b5e069c5501fbe4977a49a88fec1cf3d28d8019d189d59cf85d0d081a58bf791eb2741f9ddc2bbef1e542f402247184b04270bbc4290e31fb3e935ae46b30aa5aeb3931a7b4e6dd24a09543308ed9bc75d4df3e2b41705600deec43976935f5557f53c34795c01f408507ae67cf82908feae9e7cca6055be8a150a7e4049a45646418d9c1fac249b07f3cd6567d0892535b6a19f8678bd6d0bb5a83defaee3d43b4cc8b974ab69e87bb409f18f10f224f33c16a8208301dd30ee3bc72cf46aaa49ba28564234cca8701acc4730ea0532d14eab8626ae6d9a42775569e213bee3595b040d24284d0e06e0cbcbef349363a6912685f6723959f8ed03da9e54bcf13cd97d4a2656d50003cde7427d4ccc0d07fc005a9ce01661532645d95da30a7175d4bbf9fc804239a3b88c9e0c3b5c8f06e64c762362d11957e82486ae464f82495192ecde94f51e95d6fbc45eec59a7ffd110dc73b7c13b1159fdfe19b7246981738af4769aa8e5c7d0eae0e84c08c9a1e3ef3947850a24223322e97b7835ef1c74c3dad00f8c94ff618647c6f6ca149412769b63f6935c2de6740575ced735bb71bee0673f00f62952819ac3ecce855f005ce32030bd38ca2c155f30e57c487e4d6a2f3516927ce93ff624d24ca5fa25ae8af59659352e9faadae52c6d3fdd1a1ad99da806169406adab524cb8c0f1a5a9870d7024c75419f0fcdcce49c3de032002f35732d46785d9abf8899ed6816cdfb56cd58a29067558bf0cdf7d1863c889b5b06c6de9187fa13d68443268ab099ebf948570a18b8a95cbdaba1db2b59dfa7e26160c4e3a1a59aafd3159ce1b4f2b379dd4b5822d136009c30451cf0f87689661fc310138e0abe1fcc0e9f44d428211a391d0692db59751906cdfddf3f0e3ddcdfc037428785274453193c43a172730066ab42e407d350fcdfbd19b79c7282cf2ead0c8dd6582197df211963582af5473f86d9f0de4ee8bf82e2241267a9b34d6402225c689451292585aed7bf71dca358f8bd51d91f4be366d77a3eb3bb24ba9a2baf43d226d0f42b6b5937594bbb9601666732149afea9b01308cfb3e6d62fbb9da0c420923e18bedc06eb61b1ac9227759100455c1a1d7ecf4491f3c9536f8a0a132992042a0eff7e5b0eab27310db5ed9072c90d40a1f7c0316c2070eace3bdd37ea76b5660ba0e4e805947c4807598506646281e344952ee4b0bf5cd755df4aab586fbfba2860fd05ed70c88c233411392545f25853fa0ca5a6a22fd7c0d24d3090de542365dcbc86ee4da3d5a0030cd1dcc109cd8228b80150d8ca9130f9a60d2c41779f96eb955789b4bd8059faaac0cda3763813334177b18a90da3cb0da05267ddcfcf734436ece3096ad47c5b4a4648f9ac2f8d0b8e4909efbaf79b441c1fd57e173c842c388b37fdb24dae9f35172223977285118eea4ee6539c219f165d9c73cb49ae87d0b5ebfc9a51832c7713d76f1f8312716909ed04864aff390ad5fa444be5732492ee6ecdc68bcc51d6d0d244fb27cc5d37c3ca048c11b9e1746d0dd93c2657d73cf4bb6b8e1c1c078527989868322f04d12d84794ab7d7a86cb7eb1fd316bee7286fb8eaab394ae71db02bdeba08820428342760c1382ea02b2b11f69d8eca7208756408e192017ec55f94a2b11cf17b7d7e0cdf5963700c7e073726bb9a0a7c5a9c6538c346e8c51c8ee624203cfebf89640004e13a01d0abe5ec851960411d2964be9bbab1349b77cd08ff2f5f3b522229ddc6e738d080d072be9a6df16021cfc8603120c0614f735c9d0a8ee43756c8b153602aa5387f68127ca53d146597a7c36dfd3bc6948e3f24418affc377d0c9f62f86307091a5523d9c1c81b1db6c5ee4000acfbbd98b0e2766ee028152ebc77e2a2119702d874613f240543dde82f0ac5786beec271f5495b7bdc4d7278863383328ce8ad6f713c831bb7af41ff07288489e436c2f31b8710cadc0ab03b23047ac4d20a3daec1ab7d16d7faaff68b413e69b719c9708a758bce501bc0ace88ca7c72b4c450d5fc6dcbeb036b2a3ffcd6c6d05fcefcc073c52f555a04ecf8c7659133ae203b927aa389e1c7a74cce1e617088c7157b14630044120b58000b779386bb17c8d608a584b24a368d2a04c107041a20e7689d9d9c835b73907de136dc540945305ac22137731604fd9e8cce64fc2c65ca4fe48b424ae02670497c12bf6c7ba006d434e1a70110597208aebdac009a5aadcbdceb9a37494b0fbc69b568d4ba016a00f5aae3a57e2b87ae8a77ff7bdd6fc810629e69368ab3da4e6c6d3e92aa23df8b3d7cf1098206725ff83d4f34cd7a51b64d0aeb37503e8c6589f95f8ccee843a63907b2b6589234cf95a19f1573d9edf4b0bcedfbcc954c07362b7f1773ac2a7d0c75b531f90c599446c343360b8fd1ccd5fd9506331d3270fb6c81501088377769785ee5036b18eb14687f503c6df9075942a58244e94aab740245c132c1f6b5a74075f6e1a0d6cb0881dc9c684e19469b67392763662708a883cde65d7e91f4e7a5d5c23ef57e0cd77d9c1517502df57750075c945202bc27f1ce68de66679bfbc978c86340678d73e1d05a272dd5a14b02e9a6316f95c52bd89a73e7d22b2d9b4fa935a53bac308c32169af57a63b9735248ee346362dd773cca5fa682a74778751b0e749f1871f62fdb3ccd71b76ecb61be7c08467e4328ee6dd5653e0bf6088ba708b9021af9c46ef96a5b8ac2ceb5606da1b9d9e4209713cbc12fe306743bc4af2d500a73cef7e28305d65fd34ad2a22c72f6d134e1f87ff66c049565c9e09e2e5081283d22e383b520947db76830db840ba6ae6ecb1c5c15cd71df47e52211141301eee9b26087e9e033ce59fe5dfffd5af89eaba453d3fe4f512c9eee430932252acf9066057f499e452c7b5267acd52a1c43e177d43d0ecfc4378cbf701adae9243b9ed0db8862572ff4e27cfefb0d8ad4bb40730507678c72ef03dd46d0b534ddbe9a9a8732dcb493cf3ac9cb38d205a935b88d67688620df25d526daaac65ef23101efacdb38f7c470d28bd3bbbffb35e4e89584b69c57ddd445ec507263cfd67b4f2d00d5ee0730120ae8e1a3c2f053c8be84ae2d42b36cc68ab8789805eccc40886f97968fa830f7f36913a5fa39e02a58df3c36752e28b5eb10c7c3abaf7969302c1f47b3c4e6554cb2d3cbd67e8f0d8ec71c43108409d723f06fe291dd9eda7e046cbc7288a65ed4f8a10f0ce0a84df61fdedf9f5c8c5d4c6574", @typed={0x8, 0x20, 0x0, 0x0, @u32=0x4}, @generic="5d408cd386d05a267ca3e4cac691d896352ff5e0538d4b3cbd3fe502159d1fd36ee713d3204a941825a989bd73f2ef2baa8649f5e1a3cfdb8b369f4580f7189b04c7", @generic="18393325c3226ea0fa11fa2741900a047e9d710d1b4d76e2075bb1f4ee29e17a43175e2c0ccd544b83223b9d725b765511c384b27591ee3a19943f6b417a059b48d6da5c40a5378bd8672b00f94ab6c5c7382ecf", @typed={0x8, 0x1b, 0x0, 0x0, @u32=0x100}, @generic="d8f99c4f0f1872439fe024b672853fb4f5f8ad591b8c1d1802f24c543e7bd38f80aa07533515ad6c0ecba5d950dd844ab782fb1518387da16bacd310bc503c5dea1b1ced0162e475bc60a71f07a08527c79c332a715e030e9f43ecd9121a29918f8080230f88a664d7dde4d3c04fd2", @generic="004b52f520fe2a84a261f616299e0dbc21479bbd017d6371240788baceeb8dd74fda1cf330f5fc797a79a878a522e82b6bccc267f35e66ae772bcf44cb1754db9e16ce19d443d5fdcc8b6851cb2b9650c5c9b143d1730b40a80ac962d658de7c107862dee283b38790bc61a36ef652d905c8ade14fdca515250bde0a90819de48e61b33ce6a65d04f55aa9d120796b501fcadbe459baa6fd6312279e29e953c08f2005ac6f4f17e5cc7429d55516304525ae2e09944e2a0cdb2ef8006b27583f2eb58078c9193299841e7256c9518b3ff292aba7e1ad985f16e7b5b7891d8743fb592477900d2ba8716ee2b350ff91f7c952722422ea64ecf6c0a4e899b89f1e489c66200cb03cfbce54b6512c701205096c08c47dc75dff7c447003f7d1ec04db00263b1a8edbb0d7981ab9b0363cc0c9b77126fd0f18f5ee6652b8dfe88028b83e5392c119da099341f2611d614c84d17e695b64100014e3cfa105709e9a7d6499eafcd8ea99b5c9657ebc51020365c5065906aebd0fea26d63c59bc09fb191c231dc61b8201693794acd81c8ba9df74986532da57b479526c3cb02dd16d597900451ffcb7e6b3cab85eabc16586c61c05761770f3028d76bbae426a948171bf2b4976df6eaa6ab213b9d0689628518ae57b47b1f8e9b1978fe0f9901981aa3f9df4a0dd0c85d7ccf5156926621724930b7196ba30ed4f5fe3f4226846f83e50e4de903c24ddb041343b78781a62a6717eb2138c1a7be2d41e313b9ce3f30e1a21a58565e973310f3895ad61e2ed08f1dd620bb0d6ee83cfde39a0e178d26feb3a1675359d5305cf01817b4a6e69004cc7c6faf799f7bfdd5390c499a4674dcf24748a5a8f95a376dae6caa47e5bbbc5ff56eb7a3c5cbdbc521f4ca7dfd95240e2f98afbaa378a64cf4ee27ba53566e6de8a10248131789406d877ef716748f542b23975b0558004babe08dcc32233232b44169207f88ebd941a3403a0e1973e7fe4589b237a9249d75f1d39bd6e873a6d0c98528583d076901e4a52067ab6b23e2162ee0ffcde0b40610072fc6aa0751a814e5b8409e8e9f3933f527a23a64499b7e0e6d215bac488b94938988b1dec7fee73de6731b8de3935a6a0d05a915cf1f77b2dbfb4ff014675fb8d2722e5a0c6c98633fd498918c5f01fcbe926e8611654f318dc7f5ff1b192899f726f9cc15d5f385f998949c574ea1a34e98cb19db2e8d2aa3395c51494a5bc34acaa71bad8e8f151b88e11e7e7dede9b908236bceb3b717956ec3355d70cd083e39fa30ab109f79914a025f1b6872dcbfcceeda539714c29ff8bc4b98768f6d93cf538c75f9e174a76c53850a5bf2ea7764186cc0d281c012bbdc2f43b506c370605a79d98869215ab0ea16986311c2634d78aaa5733281b53eebb6422693ad5fce5ef4f1871fcb9a2884e4b48f18ed1e056e4a1ba397d064137d4a0479f55a5d84040acbbb027d3e95f8b7672ca23deba0a4b4835f5cee288d2e3cb980a9e3dc822e576d839a87416704b8fe13f06150ed262829567a792b8d5157e2e2e1681e30db9f3d2ac0f23062cde43e6d4facf97b1c080a0ed9c5d3c49ca7e01ec6235978ffe3050c675ed26156ae446cf419f28bac5925f5d4ddbc5ea9e4a7d943fc0889974131ef7eb4868d9bb118e77ed4855538ade21263e95ec1970277232d19c34fd1ab56c3468e620bf69174a4b019a418ac55d3be9f8a6df193b13376f63ac0232078623f8a3585de97a141488f74b105945899723813d4d357270b768adc15b9bbd69356757aa4ccc83c23d9a58ac3c44da310151e6ac0e238c090951cd52f413790fe5d1cc85ffd0f568bb7ca2f587707fbf90134c4b4e7ca863d59003e04c4244b855ce475366584874e9e25da5b87ed423983a27b770f57c8cf0e1b2037faf477c04e4c22b9ca1be09126e8190a48a5253abb293e138c0f066fc2f0d935ab59d3f6f14cb048ff42704b4a78d338f28ecef27ccf334a12517b159425c69c20d5a8b5ce5914b244f02643a9c6028e771fa8a681110c8c7c8ca6da4181660b8f215e73d5a85d81f36dee4118d66bb29e69bcd9d3ede9c7be5d3fdbc0780ad45b2449d452c3bb135f00122f2150af42f39eef3c19472f945903e5121e40bdbeab364f7938ebd95de722629f8e433a9904940f7da8291573611860a2a337495a2750b39d0189b5d71477431c838f843937ae0873802d2f843c1c431b927b3a9fb2e0d18462329f759efe52e94f1b638b7d6ed9beb7eea9b4221bd6bdd24b8598aa7cdec9594f3ca3fbf40c57d7eb39259635001b681ea0363704206a868135ad1e4360d04d2867d40c4376331200ec89da5fed3353df654d672c0c00e53e55555749fc429cf4009a5edb4f8f160576b3b248ff0f203774a2139ce3192501d4336a065cd37773516f4a50bb1419ee7ae117973b6d5d9b8c221eab5dfd174c2fd4a2716eb93122c04b2e5ca1accfea6a8670a45a4fb9307019bd43b7eb731d5938da34c0deaaddddbd200a2cdc80e2c612e7592851dd123aaaf4e9a985bc98068a6aafeac2b4368b1169f28d0552cceb51f67d24623c8bf7a76ad5baba02ce5fb769a000480db5b30e1bdd6d9b9b073c30341b8a43bc860762ccc5f9c9741b312d37540220342283767c5e4fa0636d0f59d65d9cf3e7c4102edcbc84a164bd0a7a6d37336d38307e9d458d9fd824b5d9aca50a4c720501ed4f9e8280cf334015a0ae25e54a27d915cac1fe7a9a26bf89dd3299837fe5086b1fe217aa5d9a363d61a83a194d4b42f14ebdf42e19f14e1c2cd88f41080cb586ed7e50b08e14f314263aeb9e2caa1dd47b649ee6a875ea7512636173db78747ce2ac997235c296869b5c295a1898cdf2b509e38761b175abc04872db51ffa75a4a39d6d46e5df6fe553445dcfdfa3aa43e08d33ac26c184a94f41789ceb0adfe397ea9de5d7d6d91368640518735f7af46b78cf4dc0ab201434648ec299f3d4a0c1200ef7bdc58b5d7767384b33f95e3fe953f55881cbeb435c146c1aa494995e48eecf0f2e7bcb726aa99fa9e8d9d9b34a61fc91820e8f6421875bab39a67b4aab9989922535f559c9d769eaeb4a4778c4de17bdfeaa6db831f77e49d28c18d37e6a8bc4ce5ac48345a1162dccf11ac5703fe7f8238d36bc396b855815bfb892842ee5eb49b36eb2a70495f8e3074899320e493c28ef17cfe76282ee20c1f5b4ba7709fcbe6db3fcc6c5fa400cf790ecbcdfc18883b394a48d45b287a401d700651fcfcbc7385b905d6955652c54106a49af4eafe5cd8e805ec6fa32a99765cb2d966d0420e8bfcfd1652c0caf180e91c961046ae47e7e411dad0801ab7a6015c8a7f6877f963eadb88072786f76cea7a063edd706ea26bad209ed561d56a30d8d83ec5021023391ee5ac1985e0c46b0fe71be253bfc31ebda3df0c372a7544f60a50c9dbe670905ff228cdb8694d65c088172434398f9f89b817ba5b24cc07444247baeffb1bd412e711a818cfac48098636492fa9f5062e81e7154a90ef408913989fd43f434dd90aca158db1da88cea5e2c6836a74e38034cccb64801db41815b1b3d712a4921b145b6246bb11ea2d4a121ac68b60b87265efdf89193a88f0d36849b2ce789af272fd9873bd415c2e0a8006fdfa8fb2918517ad63085c3e84f54b82ffe160ff22f17a9c5d487df1c151b01f562c839e606e8a3e493f6e6395a608560934ce83404b34f928e8ba6b1b4011d119fd9524335e5369173e51a72440eaad3b775cf7afe30d9cdfac79a849ac02f1212a6ac82d366f7ada23df0de9a9b5796fb1410994fe0556322ec9cc0c730f3cd48df7c3d6a8de425ce5b35977c17783008cef67160d3cdae9e108ba9c73a7cfc79eaf519569b8cbe1626590c6f6872920dc512ba0c87c7592e82c13e6a8ecb0c0cd2585664eb57dbfe5668542674f0519d2b0017106d36b8008f79a01f991819e7dee37a192f8f88b184859d0c5f65ef072d3950293d8bc9eaac5919cff52ea82862a4c31aac87ef46f4669af0c69349234d5aa22eae4a5ddd781ef5f69990e11f16e5f754b205c987ca3c99931f18bf6216c500ab0128b0db24e2a3181ce9091428f1767ab63af493658b0cd084cce44afa6ab0ac8b4b62e6834f632293bd90a145f62d41f96282656093c71a08c550fad2767de8ff72a6c5b1cfba3f9f053b157a562e4658e100de82026567beb77263878d34bedc05549c9d02d6fb6aee7e1a2f89c97b4d2aea05411e192db38b9efead7ce3731733eacb5cd6587e2e8dad8f3eb1ecf7fc4a3a0183d222e4772b897416d85779f1c7de6fd4b270d598b733ea97239b998c12e904c103bec49a4718cba42011ec33b01b8375051e0071f15f8e578f226e9b9235d7dfb4ea5259f2729c5c40346fbc04620def8051f81d2ec9f171865eeefb64925fb9b9a05111d6020b6838973d0ad6717ba56d103ab4cb80766d9eec5ea8fab94383b4f0d99db716f09ed818a5543e76c8a9286bb4d1f3ab8c4624820227617be7a96c9fb8dac2bba6b0372544ebbf370a447ca9e21bfd281834b87c658be4069daceb291bc5009988fdaf5ae195ea708b8534b66685d0564cb6cac21af46e7839b71047e4a63ea6f3e9a988f9541b25b29fc5b9da5dc69e44ab2f4f4a6bc5d628bb3a71f3d5c489c2a0d700cd4d5f6d279e439a52d8447468f05342e45e16963aa56c2f874dc8bf02e008846f663bb1fa8f6bcbf84097c102c0bd76ac78f2c641dd8274877204fe25a0129caa36f5c0b7991d1e781e66fe45f4b3a420c79803b97543a7c5e96eccf5f18997cb25b36521d859d9396a2bbde0eee351c6cc9388f501427a6fa0a4d3adafbdc829ea72e4659c9d83e50845df789461f2c52d44846a2e431931481b17f9e7e853314c843ff3069f633848bce2d18ca78cd74ab4460146de10c8fbdb5f3edd34647d8e3bbf2a7f4f034604ce14c1797329111c897dd51791043a32691b51b666774e655755fc891b32daa136dfebea17eb815cd780854567e0c19fadc070bb28174fbd128b88f7757a10f910da0418ce1e59ae4e3ef02903ab590a4ba7fbf524121feab2b3333d7b679a75cce3ee8caa83d19a5141f218a9987f83de966e7fa4318edf62bd23173501a21520943dd68d0a99f471f6438abb0296a4a8da141d4c2c95e7a607d76be5183f7c1d5863198d2223de0fb72725971d748182f66201ffcf8476e716fefb2043eb60ff322a3dd22c63bb1f49c31a87efdcd87be13cb8c9d1afca905173eb7f3c3660120c888046d74e96dee3f8ada6f1740e4bc74a0923e496aebc30eb6e5619c5947b51cdddc9930c78477f43de7029a1fd1abfb39992197e4f935127fd8346d52edef6617539689eb0e03fff6928d30d9d5ddc8f230f5cd64093e1c502ee4afcbec7c5f529388ea333c3af387321913f119399d8b373b0b4431edac324df6a312960975dc5b83f078e63ec6a4484600dd81cbfcd9814a3d8edbf03e1b8d183f5452123419eceee9d53b68a795d8929d6679ee23415d75b57f862989487240ddbda1811ca6860318aac7ee0ad439b6c65083c20396c9dfd885e8f0a4792f08f5e0b4285b62077ac48763d28f3806fa7b6f05f25d33dd97c63587cd7e2e139ff0e42d6c0d6f05dea9847cd49c6b6934426a42d07fe111711fd01fb00bc5186df5ba0cc2cb52e5beab02a36b2322ee9c5b9696728018ba5f37d7e5cad1be0615632e6dd9353148c5846c2d3fe806a16b58ef045d8de4ac96ab523efd4459fcc611cc97f2f6937afe7a78b86", @typed={0x4, 0x6e}, @generic="2f877767950e2655134cdd84cc3dc0f4f154281a9d9a44b24ec43c07a45ee3607585775e0143b707aae48396ffaf6d454948999323a30692df69b61eacafde3e81efd89a23e741a2fb0233300aa53752c198337d1726821307ef7b7f1c3d60b21e29ab35e231ec2bace3f06f97daa08123f02b70133da2b1045b52058caa346040298112bcd6849dfc61477271d54fc0619de4cfa3b61debde231cf3d23f4fa5b4accfd3c144fd91208b343af00cb92326536ffa60dd962f2db5fedf61984034ad5272d426b99c9266d3b808af9dc820c8a2d03e2eadc8194c1de53c22d61447eaffa74c15a0fcce5824211701"]}, @nested={0x70, 0x5a, 0x0, 0x1, [@typed={0x14, 0x79, 0x0, 0x0, @ipv6=@remote}, @typed={0x8, 0x33, 0x0, 0x0, @uid}, @generic="4080318b24064b55128ad517f5ecc6bae86c92889e42a174d227f88740e266283513ed4ccaff12da7b351e2995058ef1b130e0eaa22a78d459868f2c8d21ad96cc74396e", @typed={0xc, 0x51, 0x0, 0x0, @u64=0x100000001}]}, @typed={0x8, 0x15, 0x0, 0x0, @uid}, @nested={0xfc, 0x88, 0x0, 0x1, [@generic="4c8359ee1e4d9ba13f97e3f2fba9c0b40709a96569bdfae77e55938113052fcbdae5e87109398752f6d8b99b6fdce05873617fe9fb7dc3006ff50e6fbb05e4cbe73aaa389dae8a2bc29a57f7358ac0adf4130f91d887b5239ebdb5d4722acc5279f78c9365cc66ce84b3bffa93f66ca4d329c58340b0623f3fef100e01059c84d7d11b7f53c64a8f55ce312bb07e909925a1240b95ea1544b77138835711bc0691d6c414801fe51a077d9404914efea4e70f5ce18e7ccf5937f7c1a30b450c5156546548cd8bf8c1bfe48b6414067e47ec5f0849", @typed={0x8, 0x43, 0x0, 0x0, @pid}, @typed={0x10, 0x5f, 0x0, 0x0, @binary="91f2f6fbb55d6dbe436e55f8"}, @typed={0xc, 0x7d, 0x0, 0x0, @u64=0xfffffffffffffffb}]}, @nested={0x1014, 0x36, 0x0, 0x1, [@typed={0x8, 0x56, 0x0, 0x0, @u32=0x8ae}, @typed={0x5, 0xc, 0x0, 0x0, @str='\x00'}, @generic="af8c54b2fb5bef1d5df7d7093eb07a1fbdaac1505a52fc525bcb6a775004eecc7ef71d3457318078a020c6c6d7dd3fece43027ff3f1517c461ca5f81f47db3dfbec592519306ae7c8aafcf81b91223ebb2c25717f0e26619886b6edc358bdb8df97b575a93fe1189543790d029b2ef2c70c12da95846757a45d23c5b83f6897e1ba04a72e455344376d04e6e1be855e0a9b81b85431260ee0f3171e77d5e8b2ce039d5e61798942033cd82300763628294f0e269fc726cdfaade7f890c6838538da2595bf209d531938be8d974e3eac3a60bb0aa0e61471e80f89ed8d3d4aa9af6582dec208f254dbf6b960674fe2db6d390b6f37f99b0f9bcb6b66c5575490ed1491b1ca3513ab28c80fc55e912fa9181cc0732564798e6d075c5cedd8b34d40ddbf85cf29ae9df4557752b2c570d9832e8b4181cd68713f9b425f692b1c2275dee6aae5136eed160d1a740d967de33c9b942565202fbbdec45b5b34ca14cce245071e142b27dbd11bdf62f89299eea1ed2020d7d32bcc65dc6615d22a2a97bc0d58d9478ddfa3575d9c9a9ff56204e015b51acb12444455278d6ebf3f2b9715ffc7e651498e1911abfaf0181a200af387624eb2ace09cca48bdf3e37983a26961510c88304ef00e33b2c7e2df47894341c1f28b5290097d59ed0f42aca384f0b7397a86ccd46d38394ff1d1fbf34b2e4b76a7a2c2e391eb8fb8633ae0d278b109d4a88eb8c1b96f105e1b87c73eefe1a623c121759afa771d0ee514efb87255689b0f1a61b4101581b26ece2ab0d5492568f123f10a7ceef7e47ff659efdc096d005771d2e8bb5a52afca0bd1eb2da634a322636f07dcb95fa4706cd06dc8dc01e6b3eb2411f59b4c34ff464c7c00979e5a0cc333f05d9897c34463378f51de23b075a3ffeaa5ce6238c383a618f8dace7d4cda990a25bffd55abe26b336de1394ace4b35fa1cef521f98ffd16de6c34b7146e487ab269fc899ce58c5c2ff92b8fd1b8ea3b585d04345c08f126c5aae8c3be680ff97e35d32096af74b77293357e8e047dea6d7767240decdc80abe10727f9dabaccbc5504f47b7c7baaec14ecb99d8213c04e6deb3a2c8d971190bb7cf30646fa576cc574718fee21b4c67ca173be2f1bed8b423259aa623539bf2e300734be53ed3ac1faf6f3fac08d4503acc043afa2655e673a8685bbfd9354d1504cc48411ce28476b5b965115072c83237f67d57870a3094b92503118c7985d5ac850c2864a28f0ce7a922c7f989bcde4714293661c1f4a30fea7619d4010270696542331c612a4100c1a1feafc89ed7c169626049861022da615342604e7ff9756eafee6af1c8d14abd445304ae502bda6985621c538c326f2b8250e2b474ecc2d9d4feaa601b62466927eab9aa2867f76573746092ef30f9dc99606940674ee862a3f28c230bff03e1000df96f20ade868c0d739c5dd615b4f5fc1f7b5a248b11a5c7501c8df2f8ca796426d13726d08cbc8a4aea585000a1efbde29948fa99c09c74581c8461e0034dd52d6d89cba1c8afa08c99c0be55f91313baf43f2abb173a6fdedb831663c0ba33c9241e656e9bd4af112c651a4d4c1413c4040547e660917e2f271814da3ae39f2ca0edefe70ab9f6dec84c800bf56675d88cec2e23748482a30eeb0ec977fd0cff9b00a6f80c0e7152bc7bb7ce0cacd73e9dd6ed37ae3dcd178163f7fc74ade33b0292a71d03959c400609d151649f380df1545ee85e73cc96269ec13f5b38eb9d0449298ad388dc4d9503e8dab09fcce03a284e6d544301c7e8652dc9695bef4a3c721a089b6ade9fc99125e0eacd0d17bd0326c1f2f4d48825055a60b19982594e7585b45cc98f03a97a912754d36a8aec7c20ea8ff5cb83c9202b7097516475552d7d7827c41a5ec694ec85eeef2a55d84f91f29ba36a6c48eec5b2d1758ebea7092ce358994e2f2c496f508f4e514559e09b5975e2cf077df2d98bdd3a35c78a81a7baa58fad6cba694b6d849d19d8ebce1ef61bc90ce418894776fff026021e9099ee672427e7017625954e33fba0546c51f3bc911f407b04e713655ec2df6a20fa6c9e83b53dba8baf894ca5c5abd360ec6f6ec51ce768f6d74be4911ed1a8be38d02af1dae8b571cebfe7bcd62bae2c4fbfdef7baa2603914f4f4d089c86a27a465047b489928ea92d3de9e82f4313c0a71f29a518dbc57a6d0b430fe58fcc0c0cc5a491699c1e92c4c18d6a8ada09d42b351acb436045f9ef7b9d37f052d72fb9642c63bd5b1bd2fc83a2e57f97bc91c04a367eddc8e85842a95efcec147e32d177ea91606ca1f0479a8ce940a18cc943e8920ea6abd1f423acaf375237d80b12715ff2cba5fcef37b7b9a1dcf10851a685719e8c9f01b494ebb59ca8761b670e6a6bb56a5a5d7df408a12aa1b42484bb029d602036b18d9e64f82fc20beb9dbedc86ca24ca6955234277822c6e4b1075afd1db3dc56ce65e96decd26698c347f72965a3e433e901cdb25969abbdd6e2ad55ac94e8d4c43a46bdf4bb722c54245bce504a761e180820a1cc0e269f575f52a3d192c9fd20e70780c2d98486d85a0d078fdb57978ea80a70ed3defac5ba81e3f26df3c38822987850374acf6ca27153b2ef94939ed3a4f9786b4b92d7ce050d9438e233bdf8698242a78936c96d2b86e8206b76eb4656ef8a3068cc46ea5200c53313c0c247fb5122a27e3809d81f108182393e54e47960e347e619f9d6a411de183f6da8a3f8bcce00f72fc8efe412d7178ae5ddda1383de6afa3d6f2a90b8911289e946a89b0b619d051246f2ca34a77062e4cdb686834a1c75600591351cf9773203f7f202f6f5d40ad478c6cadeaaa896d45a13cea8787c4be35b6b963c13649bc6e7b6001bdd85c873c57c549d17c94965efa598879b89410df7e48e1eded91c57b052b6e7f63ab794799c3fed6580f4c55acaa33ad466dace47348c8d32376444d6fd98c9b8fbee5aee52bd30f190f95ae064210445c2f22d8468a050df6d05a834ee12e20204f1ee1d2e2e5cb22ddb70ecdb26537a61ea9d07707c552bf10c30e15851a9ef7fc034c843b006922ecf3253871978cbbd8c880c250d29dbc4f27f38446006a8d39abd49b33ac0759b634b3095379b923d98b3c1992a1a91a4e0633e20737270ca7f787040a27b5a018f2be98e4327f4b24e81eb9adb5c434e2c34e8e16325ca9c544d6b1812850fc51d2ee33fb9cc98c2c503262f4c4c6d2747a898feff843520e2675a59c601e17dbef934dbcea1368f49fd08708ee1fd37f00ae37e0a80b76d853c9bb6b104d8f3a375acd3952e6a12a902a4ff4c01c8935651bbb96182080ad056f2c6a722e7608aa2d4f3c8be6053a7cbc90928080a402e359e84dff0400cf5c77fda25855576cee728b6f83fa2f47ee75412485b65decdad7f5d53327102ba5822a840e542e16a751b71ff507dce6a85ef5a37a26495f25754fc0f69b0067cf92a8d88c01970689152dd4dd871e476728b4eb0df0967df11339a277a332b746210ed10124afd1bdba995100db2d996c652d24167ae6dc24173d4df75da62fa4869faab5cee7c7c81295252467db0a65e141f29f0ad4f22c5d2dbec77cd9ff29df30a321353e81d1582bc72ddff3f6e4191eb1267147b570591dfe7203e4ba180efc2b52662602cbdb17a01449d2ccd9acdc0c3ba8c1b079b77bfca8af264e721c97fcde6ba6f77b0757a51d9fe43ee2bc298f2ba8783376258d50fa64438c77d481da64fc143b61deb271c6b79910afdabfdf8f46e71de8f49df9eee7a5a88e88393ffadec6a941878229827a94bd1018cbf25832ca7264162b43bb84c6c1958e8ac8b39436de0d2cb7600453cb2340161dade582d1f289d7f75cf4fed63d29390a9ab62fcd4d8bbfc69ed91ec2dc87bd29d324c13473e0d240b3f0f7d731e5f1a02d8d3867da025c88d5dab2356fb067d67248721d0fbb4c58968fb2a187df0014eefaf8c009a635b1a778eb56643b4371d24c3b8445b5b46062ef3a87ea7de8234666c6893a73083d4f7dbe27cdd8d1bbb2ef8648e9515af24afdbd0e16a38a2e055d37f9949facb217c635dc7780d4105bc2b1df20810af650e86fad0bb4885989ae428467c7c4e178646358e25dfedc70fa377c14ea99be5a7c3c8fecda6a46d02d1f801e77b52df0e08f1236d3de7cf95b84132cc4fc1d9a490227992d3c823d20cf3a75a0b353b72ac0bf437d26210b4858971937f579b6650522279b3eb875033cefc06415467a572345c4b36a5d8f51141ba8258194fac6ea49675fb142372752d8dcc497f135bbccde8090db941b54d6978c4f511d7f71db190502583f6273e078a724e920361daef908af93ec22de9175b21ce0d8294aaaa29c86491473f6fc0d41004e05603d1dd3f34fab0c39c14da2ad51b15635ad24eecc53777c7180d74e10fbe67f17b2d8c873f8e47dcd10312a25f2cd6b7994a0340656b17831910132b178acd2242fdb659224d3397ffbf5f8f4c1e4c2547c491d71f05a2a2fd6203a47f00139d2a44e096999b7768278eed0f9d5f3b2d6a006de85526ada5b65017b8f331ca23a392e410a5656ccaee69802b19a2e044d1b6d4d3617ff0185085d93f9f8e967406efe15d1025a1d48bbea59d7b4020e3462fdcbd44decaa586d17bd5d2fa7aa17745aa7597f2aa49a6af2c53cf2efdbadb9828e730e1aa447b08ab5a4b807b222d23cfe5a1b1f0624e545d2017e1b451435f3ec2241b92ea351acde0171e37fb1470d872a5aaf1c38342b6fee134002f657154eb9e4efd99a712827a50ad98630e7e2939e332ae9c5e39ee70a5d547d0fc9535fa7740dd278c6d03ad15ba374e364ea046f49c783c56d1546e766d90fc15eb7c6e37a4719a045842f6cfdff343f13a317f542ac0c19b61f0a8ba1e34c3690b365acf3c7bdd4ba72836fa6870b780b7c7d5f5962c69d0c4422fc992a6d8aaae739f592afe07af06a00c9757ca669b0744faa35f36ffbbd84727e92b643e37dfeeaf03b492b4b673c09d18ce2f3efc474339eb2cae605dd8144825a4d42f41a9cf4d4960268c469a46765f045a6056d78c50c9a2f25eda048a4ec9a32bd1727f4649999d3950a61d4053716c4969c387ab89f8d9f497fb76d55048206dd459b9174e6db9ce7448b0c0734850475bdc23595de94790b2d7109b577f72758f80b01e896f7738d5ca55b450bf37d8b9abdc35b0e3b612fb00a3b8357b133cdbf8e07878bb85e3b7ac27b5646366c394ddf32aa158e8780daeee832811d5434e16401706a6dbf1e8cf1052f057221fafe0f59265e907195cd05e497753d11019f183f83d410d294589e3ff69ba96c4e9d660f795ec9cc2edb70e85fc1ae38d022b2cee640045f23451cf7cc6280cc89e732866783a7b2cca3393835dec53eb893f5a2f985098c254b075810779ffc877b235ba64a2b62b04e5d6c560293f593c273baafdc0b9bb42e4a34419cd501722c153dc667f4b71c50f4bbacba3a25f9f899034f058b37f8b79775ca84ac2d1e529f43ebf3107f3751365049f92361b22f3f51a9a3f939c1bbb826ae6e7ea9492b340a5b03b5fd9872a1785dc3273892c08eaae69dc86904815de93cf53a30e515cc1531e13ad5966fa531696a86386290622deb8f056822f203158d3d5aa4851bc4f8a0a4955684022ad80f8c59ef30c4526e009b6b63ea5c9e40b6f319b1f466891daecd9b9497026de4275c74b82ead542df3f4a5e26d73609f545dc0e278e4c6c9c328ed1079d96426836e45cc021a9a98325b4b648686"]}, @nested={0x58, 0x63, 0x0, 0x1, [@typed={0x8, 0x70, 0x0, 0x0, @uid}, @typed={0x8, 0x55, 0x0, 0x0, @uid}, @generic="603dc830ec7d42a2a043ed4bf0c050438d1999ae4e69bedc634029970dd312e09d5b50afd003e8b8", @typed={0x8, 0x1f, 0x0, 0x0, @uid}, @typed={0x14, 0x1e, 0x0, 0x0, @ipv6=@ipv4={'\x00', '\xff\xff', @remote}}]}]}, 0x3498}, {&(0x7f0000003c00)={0x4f0, 0x3a, 0x10, 0x70bd25, 0x25dfdbff, "", [@typed={0x14, 0x6c, 0x0, 0x0, @ipv6=@private1}, @typed={0x7d, 0x1d, 0x0, 0x0, @binary="48bdc976abbf150d7d9a1bd6b5788289490c41f0d1bc0c86025714445a231a1f397df36dd120b081afb3a1c7dcd4fa1a4c5a7856c18e28d12c7d39cacd5e2915bbc1e44bc423361f5a298be1952dec43962a6baafd6bd9ef7ee9862ab54ebc1bf4f64d3858cda19e33514ba2652948e9fa59817d1381a601f3"}, @generic="c3a6e43e48f963259391e947f6e0152c0553d93a9c70fa4f1376e83475df4842ca4f21b546a17a7c0e7540a2", @typed={0x8, 0x15, 0x0, 0x0, @uid}, @generic="e469e04a77c06bddf70f1c25d09499ce91a3dc77e977b9255d93b06ca064d92a41839be931943dd43151cb4863f1892d3311d2ad7bd8da3c56d85b7675c55a478ff311614da1c977260b2a979062b9d7e409ae1f1a3c3bd776aefd473eefcb1b38088996be4ab2e7482c47c1f130fe14d7f3a906e1ec7f3ce89654d4fa6c7eb4ff5ce43f965fbd1e409fc3ca7fa5f5f7e37f7bf1e80d20ab9eb065812aa0bb8691f01c9a14df46ab6791e6f91c7760a60b9cbb0349f2b9086e47992d0cf9e68d702b4e1709", @nested={0x350, 0x4c, 0x0, 0x1, [@typed={0x4, 0x3}, @typed={0x8, 0x8b, 0x0, 0x0, @fd}, @typed={0xc, 0x1e, 0x0, 0x0, @u64=0x8000000000000000}, @typed={0x16, 0xe, 0x0, 0x0, @binary="91e2466809ba9981ae2983788f925a0f7db5"}, @generic="53d7c017b6aa9bbca8fa3bb0ec1fc26e28a9be73babc2047b5d0fe9f6053d907c2dc0c402a39af8e1affd55d8ea112656af6b2d8c8365ea81375332330e2f3594693a7b5afc6a0cd60d5ed469a0e1fce3ddb32d7aa3318cfbaf0d69d81a5ec23046053767243c46c5d2e1a5c8ea3a4e24845bce1265758ccf85a3d32e16c9b5b191a580b283edf3f6bdc09d7a8a83333e8c7f581b932015f221f99d2b7338494d0bda53a01ed45073c70fe9f", @generic="452e09845665c68d8139e15624e5b0a6c15992816259d844ac7923a592c7cf224dbb1a80210d99aa62ae03041a6714ebd76005e05394bd71bf7ff5f34a10d0b9becccad51632f6790882600ac59a09b6e6c8fedb646ecf437c8fe22e165ac7f70c8aa0bcda453ac84c67edee53f0fee21ffaab88b5784f6ed83118204b0b1a1d2fcca745ed8ba1fa1a6c110ff1825e36ab33fb998e50b2ae18ce7109e390d047cc848817429ee15bdea19226b88ffeffa8fb70ed0485260bf30c11c83113f66b466a935464d7c0e58de4833a45f41e055db92d4f6a00409493a46f093b79205c66765123e2a2e6889227b40851989ad83248c0ba9e879a1e819c0bb5eda7b2", @typed={0x8, 0x59, 0x0, 0x0, @uid}, @generic="00474dd4ad2dd791bca50f506248ea829efc6eb88a58e1b48b8ce44683f3c6b73c24568c636b1d4a9e48740ed165d1302e6b9dfe4910886781582a554f8f437b1f15eaf0b9df5c54304b8dbbd256555878e8f33315d59e8924b8b7013eb83a5da4cbe96ca1d4b6a90ca5414833631a52b1df2898101affa7fbdd7f37e8579e05f43ab1b38bfdb27a19", @generic="e5d4b7742821aac12ccd2f7ec93fd67d9ba3459fceed399e04d9dd4213d7526c2752bae4a8c1a66ec1c40bb02985edfd2d2277d47e9e874eff8db9e1c9e706846f546b0617a15fe9a809d793391e6c79a2fa1e06d2a842b3e96814faa35fb768160d7a9764d1a06ad97172a9369393d5a3649c468ccd3d04ab3140da9e640ddd723af9f2af57a01acc086b097228416a697ca1f5aadcdcae236761b85d34bf182f4be847706e26b6ae8304f002d1cd08de8e0a0b321cb0a43bb90e27e306576f83591399e0e5a26c299a36803ae0f24f1564d7b8b2512ca0ac387a3051f3da04"]}]}, 0x4f0}, {&(0x7f0000004100)={0x3650, 0x31, 0x8, 0x70bd2d, 0x25dfdbfe, "", [@nested={0x18, 0x77, 0x0, 0x1, [@typed={0x8, 0x99, 0x0, 0x0, @pid}, @typed={0xc, 0x28, 0x0, 0x0, @u64=0x10001}]}, @generic="bc44a21cc9d568655f2098a2bb119cab63281db2b86beefa56c94f77b71a12ba23096539fe2b1160a5b40421557b8fcd8aa25f606487fb7f1d8bdd9477eb3aa3bd2cfeda4037b8001d4cb2cbe92e6b85cebc696b97a0", @typed={0x8, 0x8c, 0x0, 0x0, @u32=0x6}, @typed={0x8, 0x95, 0x0, 0x0, @ipv4=@dev={0xac, 0x14, 0x14, 0x12}}, @nested={0x18a, 0x94, 0x0, 0x1, [@typed={0x8, 0xd, 0x0, 0x0, @ipv4=@empty}, @generic="13e12eba5099012bf22649315497ffe675b52109b7f88ded06bd36395ef782107481c1cad9b738ef7619862e617d7a6972d9836e9af8a06c8fa2e221e6cd462e4cca23db6d9a924038c3a2de3559ff3c2904d72b79fcef9820fd9091749a429804eace74565bbc9bd3d5745aedd582258d4e859040999f81e0333c7b8b7368db5ca011d4239f6d8cb5577f0802866fe0161b018a7c44e698dbfaf98700dd43cae04edf5209fa32bda2a4dd0f87b21f6ba340c3f99e58829c6328aa2a2cfcb70b379ea49bb258a310356cd541d9bf02a6a10452d9953c95cf4ac5f00d27a1ffc1722c", @generic="d6e9d21313086023ffda876a586f756718c46370842b8a1434edf43560c2cf15e40a9267349fbda286a588926b81498758040a9e1b67b49ad138f5915191cf2d1e59464a3b7c4e69a9dfca80e3a8", @typed={0x14, 0x5f, 0x0, 0x0, @ipv6=@rand_addr=' \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02'}, @typed={0x7, 0x3f, 0x0, 0x0, @str='#! '}, @generic="6d8f0507d0137a18b5f2f39930b7ee7b3077d8b2761fea14d8944ecdd39f034260198ffb348dacf4658d032519f00bfc9f6e"]}, @generic="ca7a7f3ea01c144754cb0a0fa6e5a1d7ba82b2e6378bc40f8027db8b402c706b30dc8f3b3d62b0e9046658fac263f0b45e4f77598e8ec99b714199793cd958a7cbc6f401641d6b58364656c3f44e735d8a87f945adfdfc6a54ca91af4f50f3542a290b52b360672630fc9ac966b65db67268c5abfa59a4f53504ed525368209c8205ca4bedee1282138db844509061249e0b544210097c8258b507c369ab8e31a00f955571bc8d23b600a405", @nested={0x1346, 0x10, 0x0, 0x1, [@typed={0x8, 0x53, 0x0, 0x0, @u32}, @typed={0x4, 0x6a}, @generic="bd290633c339c3490a89f380a56c0f85e86f669f7ddd204d378cd0dd4821c50c3ad0c8eaaba73323c70b7e8f8cc98f6a3a6649b3378c1bceb147c56cede73722c1659c4fdfa943b6efdbb123608f9b3bf923ca23602a00fda577d1d4414d7e5d5ac114959638640f4c25f97f1bdd4f2bb2db39fa1d0c9523573a5cdcc1bd7c2c5ae5c44faaa6f3668d0e6825c418fef5d10e78472a4aeddb2b074e1d94ae467839f6cd86085b3ac6f91be4179e9a67eed45c70eeb691cbc7af028257258db61dc5e55c5486fd366577cb57e8b10ecf5cedf19d92b3707e3ec7b09417e4e8686804d68ed8a2209fa26c6d89df19be0901d8698f970631eb32ceebceafce7dcd350f22fdf5850afaed68cca4c3b45f438773162d11d1dea178e4dcc2e912f7fedfb206c9ace9b8596fb7e06c9b5e2cca3c85978cf1dcf38ec230b793032a1c54b7148e6c1204cd866d73f028a67b1f55c727c935afc9f85e35a9890c6912dcc01313ac9aea0e7403954b734a71d783ae98ed55f5a2428a9ef113713eea8a187cbdd3f8822334c9d70fa295db79f1081148366a8caaf0826076187445f4e9d49825d15c4ea4b8967bc96a6744b1cd7db2add134b93077bdba893d53a972e0390ba9884d9949475e5d91bbb0679fc27dc4fd7488daabc8e387bd5ebd7954e51b6399ee9c41ee86089ba84a1964f47e0b186403cd3d4535599207671768e3840ef40dee2bbcf7b8a3b4f0fc19d9bcc151905bee53738e56aaa80542abdcad3f37b7f98f367fd99f037eb00d394e7e7735adfe83b895d7442b01c71d0f8fd234e99d1a07b82d39b284ac45a2eb51af0e911691aa76ac436c3493cc6cae121c68b80b12f02efcf8e98d4169bf9d0ab1d62161a8837a4b18c72fc23a211c5193939c8fe869b640a04975ad65fc9101cbb31983b7231dabb9af509789566801c860282bd3add3b96c5334385052ca97a684850f92499b80afceb89421403c3b86ff4cdb0d6ee5ca4f69dbcc06af36d2c54f2f65bd84c148b656dc81b4cc188d285c3eaa1ee339d88551bb4915cbc7e33e6de31134a766e36b3973e3c7af9937f8a2c584525bd15445a1701f8b234b2e067eebfdc96a5f197389836a63f37f71fe4a786f55414ba7f98b8c1020d199948ebc0e94a5eb2cc9334a1e424561a2513366df9db2d5129038f7e3ce6d35a7efa9913eeb76839650346b9eb67b31719b202cdcc725b3a76b324e6722fc991071c67cac407f291e3fced78b06264681861ca9d50113af4d2735d1219e791a8eb8afbf5dd9157ff6a81d94d737de1d90942be707d19745902faf8736bd0b7a7e0e7ce7b5f0539ce3828364b26ae2da5e34a9824499233ee4868e24d14ee0948bae130aade4c96364880cd907cddf78ed205cddcced38b607e47d802eeb234aa7917e306e86e08870e3c907b231618911bccd120509e290f60a9b02b43c4e2ef44b0575557255861502cb035bfa5cdfccbd0f408a95851603a861d1e61526145a1f337c0e76e3de1cf66fdc134a6fde4aed7aaa1fa782412c1b4f3ce6c78d5fc596cf3e7f44a1801257c5928aeb35849d8a73b7864cd9a1408e08d7208fb3950a2e5d71bd37b6782735c2dcf76d64a876beb1fe34a5cfb6a9f8dfd1553b13609500fd5157d385cb45246b61d2e9bc89aa61ed985c77e1cdad2f5ba1411ddaca6e40a3bdfd997b0ef2d49ce41b0329b98c9a7735f3c8e18198dc6c10a61b297fe4100acbe3ed1c9d8afd4ebde70c70fde51e41b099954e7fa305d282876dca7079b9d4766183115662c094cd18b7418b4b6ff2a75c4134b342dc37fd4003842c20b740532298537e10a5d95496d67d8a6a7a4a515162e03b51646ba7cf61a993b1910d3ea54719475fbafe3d539be13ea2827c71272d591db6a1482b25281637c1885a5ebb534fe8249754c21fad772697be3994c3d2a072d557f936b2160e80b3a4085fb54fb639bbb3178c42848232060978d6e01278011f20c14fec183efe3c047bbf92d991fc7fdd42fd149b9db6b1c867ddef09a8975b7e1c4f2f401e637953f32b83d2fb781ee19bf957bc4e4b668f6c8084224d237a24220306179d3a2b5317247572f200c066b3f716cc62ab0501b9dfccf79d69c4bfce705c6126feb594dd9e8aaf154428c09aa9ba668a5a05a5a0dc48f9c9a1851148cb8b2331c09ac857a898afdd1736c540e96822dde260ca3a85ec45d738059fcaaba2766b08387d84f8361ac545c35e3e7374afda97b40e6fd92cdefe212cdc3a2d2877e589d020b3eaf807b5c90180a9fa5d87b127f38f9b60679ee445f3c09a09ff6ddc6b729c348f061859b574a15a54300ff5db3ec2e492f53b66bae11c85fea16899dc513986864cc4f094f78ffdd4995bf689ecb6af9a67141abd17cf1929c5a1fb634c8007e622908abbef2110a63b64c5bceeed0b79775d743b0626b8bc880a515879c30029b73a9a74593c0019716bbf14f9dd514bca45299bf89f3709332a2cf7ac035310b352c3e28e3021a15918ead15fa22d3a156b7cc2748361dbc69530f99aa9629e76c407bb7621f663ceb5ddbe5b12fe4b71793075f4a3d6e60591a2c2f26a961e7308e36135535db1b713735126792e1e8e849a694a95e0c6e03e2053508985d05c3d3460e1c0aa52e56a0330f9c2c80f2b2cbbf8a5387ee9fee1b3d2de34e2bdf9bb1cf5eacd1430e07cf846e5bced85f1e753aa1adf482ab6d9a2a76d4b0189da0f83c674e059d89983a73b5eceabadb3a5e1a7363fba23fcaf92b81247146aeeb6fd3f556c9238d63cc892218615cab9c2025f13e857c2bc4ae57860f74dc1c92d962fef1db3fb410b3d5762f199e03c303bba951a8bed31b069fc19749de4245b41a9783b9d4ca5d0e0debf13e0421a5ce91f60aa7f50e1787c642453cdb10c11e39aa4107db99815e196880729e7c0c3e781df9825fc7a6cef0912723774be637af1dba89bedb3673da8c4ac4a63151072a82103c3b912de1129e60418b8cda3c8c229e53f9b2c6b9826f9f166cec925dfb9722a8c3576162a9df9506a60be0844f2bee621e60cdcc3139d19bace03d0f32194b579b7a74b8531c46ab5249fb72d17085852bcda907057b5dc242d67d930afa3af0d824f7e0c775c11b8a3b5696fe328df3a17abaed68647aea87c08d3fbf1ffef7692a6b3a83a2e56161ce2127877006e3d88d1b125233b458a42fc30e7784b741b5564dae78e91e878e6a52dc4334993ef2549283955b2c80e05f83f185878916463eda0332d5ec6fa03d3a417d9417150bab5a7356b5130cc450f814ceac15dcc06c3e96e5a5a8931e1215b3841bee0b297683a80e8e664bd01b9e7fe729113b2ce18df5ee9ab17bdfeb5d9f200d842a51dd2fdb071cb133e2d244a663d468bc006315c6533d05c8fdc92d1e155db688d5aea53d586b0f79bdbdaef7e399594cc2777af9eb67e33372dd6e7322975f16630cb1a9cacaaf28943a8392078ade91e33fbfbe4c1e03bac816a9b3912704d02f2ed53ae05392520e4d07061151ba6c938f85e80b5ba296ca13825c8e6be141f560e9ef98ef61cdb080436bd92564998f3617458926e3cce65517ff65c59a54cb5d80b16455d3c962b8c86d5fa7204b6ecd5dce04c9cc732fdbbf67e6dc50bca3ba0b53255b47491b3c5d6042ac298330d049a1f95629ef19673a6b03b0875580e110041d04cf4696d115d15cd5c4acde347d136995d8448e25dfa9076ae2e5f579ff9be1efc8c5ad75a9ff853ab579387964f61d5c499c54b3f08e50a4dc345581b12f3ee3afa4350a07953c0ed016508c7d95ce7f8e76b593e54e05ae131827f8eb5576263c9974d3d498f532c39aa790fdc4482b0c3ab56e771fc4a5dec854a9e9421ddb2470976a74955ac3cfc82780918f1a856a485dfed15631ffb3d8cd614bc721ece74f86f722b475c9c38ccc6a194cfbef8ac407bbf314ec5eba9057384d2cb0e6f936a363e822f990b66279c33e483725715e04b94fc69fddaad1b619bac6227fa44443bd0b7e77e5c3b9d70ee48f060abca9074c53e4d0598ff2105be2abe5bdbece1eec65a1ffcaf24cabce8f88fd2b9c5da0e9ad6905f614b70160c8127e9616c8940227553090813924df7d3271de5f68f2d7c717839c4fb12482ecbbab30591ded5cc8bf2a7681aca0842896d4d776bdb65abd8bb11a19f98ce4247c2434cb6e824413dc7726e93009b5fed9e027a20c74c2630496796d2577d77bf4e0f96eef1cfa04eae42d61df3b6a703c7f23942239b47889f3015c9576ccbdf9ea19ff28060cf6b4ef8b4c7640485b373f15206a2ce56a554bd4d026897e13e407ab081efe8b5e0a9fbae09dd016e6993ac491c14d4d178949fe60e5915f0e2eabc9778f9b0bcf61185a3a3e514c01c621d5166e67c60989ea6927c92ac0fe8d6e77c87d294aa83d1a44e31f2827917bd2d97a952336ebcb778a1fcd9a3c7ff3eec0e4ff067956fd10363173845f3eccdc4b194694537a1e31814d24e32032929db9176aa5503809b3ad2f80817e34e611c51f627517b7dc7fa1cc028db09835aba41e1d8ecc2c1a6175c6e83dfa079e11fc79499451fd1b375a1cdeb9f941d0c9b8ed4f822ec1e83f4bf2959edccd89513a85b3930d78659fc061b760fc89af202ec7d92c3159b488c327ad21e4628c796880b4cbeb4afc530293747f3b9dc16d6bae953e018e5f981f6f87c787325c0f03d5cd8d65ab12b06e834547c95904646f35392bb1e2624d09b6986632de18a40611d58e6c51f160cc23079fad1323a319249d659a8a8f53ade5e423440dfa6cf93bc0292c5f948bc4e54a4775545262cc001df90b59d56c3dfc9990c874a719602ee9a8563ff6dabb07f79d3f25514c709f5cf3afc1e1f4054b2433c7af38ad4a621ea70935185343af3fdc742a2a93c0933e5eeac9a79771ee77c10d49956137f1cef73c353200762cc858f61c763428b51d5bbbb72dabc88331be273bb3af1921e027493645848f0d17287b9868b9aa5873105bcc92dc116d124b89836e8696dc7e23d9bd0ade92cca481466096c134c712f8dfc7b2233c42ad2a6e058df90f782367910624d8f544e75f338c23557df2e2b792409f40124dceafb805d82808817eeda43bd57399af2d65242cb822932e17d27521417cad42735feae44248c3734182cfefd65433d47f524728159575182a5b7726ff5f7c62b5513e0e9f27048d5d6b108493c3dd138de89ee7207090efb9f567c7485104f969339ef5472652eef7230d39bc4b2bce65e522451da075b79b695f365f5d4652d7677dfffcf40e55b6653ab63131b74f78a441012856563708385e39457841f9b12787c2e8c14d6f5960c5c9f8bef8ed7651f0c9c97cc673c7306f52f9f2b40f9e855d51aa3dd91ccd8ac8383c44a813eac2d3cf3f0294a46b4a5ff42e70958afaae88e472934a7e6b74e5eb0f1c301afca66c4ecf46e65efa4229894570cb9ad3b42cccdd6ddb2185f03ff7d05fb1d9bebb396725f6ffe1bda03758c4a89b46c101d7a09620b347ea032d7184441021c1aeedbc41ade758a143012f77f5c3cb9bf6949337197b3295900699d76de2b1aa7eaa4272fe5c3db90f5d82d1ad7a6579c0c6a78b2692767e2657592814741c890646c2a8605af04481bd8c8d5a311ff1a9f31cb206dc45e0b7c8e3b2244280aa4cbbc052c9042c5ec1bf00127f2e40e04128f82a98854fef5e68550400703986f05ce90f6950aa62f922a7cdfdfa1d214e01297713eee52ac07577b63d91f3ec02bf87e56ac1076bb8547fcfb", @generic="4f80c693066f453aba77f81f494ad5de3f0bb0f189b5857ca173c1396c7af61d923d22ffde868c67643887f0dc47dd2b34f4c858687b65e31e1bcd2d3f1bcc415384e97edbcd36e05976d422767654fd817626e71daf7d640b9caef94c096bd4c3fb47ba551261ab0425935f87d3aab2acf82c7e8a50c6dea37e095e9f9aba1616274826a8b8fdb1b6d4b33066b4ca7daadfd8eda2e44aa58e347a8d5d556f76c73ea890b6bd39c83147183d0c13ea06238c218b2271f069167eeb8433e86f7d1d84b4ae7575651977f5642c3ef92bc398f284978cb883f455725a0a618f9e96d25f5a41f624857dd6032eef2f9bc600243931ed9ecdb7a4e79f6c4d4fc9", @generic="a02ac7bf3c0876ca66e95a176fdabb425f934cf86ae0c9c09d0c4fa369e6e856bb3a3c0660aa1b88fe124c28fb525b3c9edab9cb45c2ff6d5a68261c1d41c6810a0f6b35f9483d90a19e9177c05a796f9077daa82514eb8558b8bc2ccf26ffe610be501bc49535b36a4ca0bc4db8363b87e73b28ba0106ce9cb9ea6b4080e5f479c6025c3d6925e2a2fb0f5abc35a6c9ac9aa896a6d4d7b52c61feb91b21da9a25e93bf9d763ebd8654cbecf5f728965a303c36f2c", @generic="433e979948cdaa706dc09323d59b496a801c61625f031768c2ace4943850f60f4f227e04e551c7d0036b3600b31f6659ca04dc52a2582efb3f89e046a628ce98aecc0492409ae0a05604515a622bc32076db8e9178d4ab6171a556570202f592ba67104cfc7edecdd2e13cd44d68d6af45ee4c57a70155ed244b30d21006daaeb37bf12cc9cd663b4f354cf7b9976e06c5d5cc003b5b01f179896618467003c3d89e9a74a8beaafa35247e49ed60cf627e77020602ad650b96024705a04137a874a3b81ab26b4a735d86c6c2b522663e558d9632496cb5dabcd12e7ec1218840afcf3a2b27fd892ab32b7575cc862d4e8a4fd7740e18a351", @generic="ea763b6f8786", @typed={0x8, 0x76, 0x0, 0x0, @u32=0x5}, @typed={0x8, 0x22, 0x0, 0x0, @u32=0x3}, @generic="1a3fb0e9549d92ea07cac37f8933ce63fba6a8f00d1b4b59ad9ad3586232c1181f6da4d0409d7f6e5a15b0ecd8177c47be6dc1e226c34acc42410cfa0e7701f5f23015201bccbd1c79eaf6e7a60977c016d1526a6df45841bd480a4f4fdbf53dd4b447654255578ddddedca2aa8df2c9f29f6d3cd3"]}, @typed={0x14, 0x7, 0x0, 0x0, @ipv6=@mcast1}, @nested={0x202c, 0x4, 0x0, 0x1, [@typed={0xd, 0x69, 0x0, 0x0, @str='$]!:[\'+}\x00'}, @typed={0x1004, 0x26, 0x0, 0x0, @binary="10eec0ea280627c6ffb129df4047fb85a06c1aa28dd39b8ab80c8507467ba79a0dfeb30f1127ca80abefd9d4261be680716fbc6ce88613d7d7a6e82c760f06eb29eef3689f48874d0f96e1c382397732c33fc6d9597ffa7169a0332bc139a455a9fd208fe3e9e57f363113e79228c1a44a7ec27168406ce5cd679d53488620e427b72b2e5405fc5f9336c25d6e5c6d7b195eb4dcfab5541cb32cc80d75eb8c5a3f90193c886a2114be9a884de6a1f2332439133e27ace87e26268caae035ea27d36948a3bf973645727ef5e902a49d69b331824773994ca5164ff4d1bc7fb3872d31d4ebca576a500066655bbb25c36d42d8286c76a619af8b6a7cb0acec9481d34af71e9a0e0ba3f3bc5595737ce40f7501674950414d98cac28ac879b5efba2f9aa23e20d7c1fb591b8afd094d7d54001895b55716c80bb753637c8d41ce51cae5cd4332ab3c353e16fb75c3a85872a104abc4961e0aa8d54f46b1feae6075bceef2f24fce4c809eb82a82033547bdf8fbbe2c96ce5f20eebeb772805a64f6d9cf2b20c93e11453140f34119b4ca1a5ec954177838efb19e5baaa1196a7ae395580f9d3097483f237eef4896a9baef56e539094aa4b67bc1596ee1af18630b5392dde47ec52b56d83bd77982f7b231e6412a2bca83e10287b48aeff87aa5935be347a3633f56681e0046dc78c6f7739efe833e59bfafab0e4871b5aa971a0142f71f8eafcbfda78c69461f5b39df5e7372b733f2b3bd6922022f68bb0884b51f171ea992f3630bd2346fbd7e830453bb3928255b0901781cc68a5819753a6858215d68bd00c684d5ec0e244741aeddf9b433e4953e2ff1c5febc781f761b686061efd4cbbee40a5aff2f87f8f93301e2d79080c5eb59048d23bf70fdaad055bbab0acf53614e0b0fc792de8e66dad665963c13a7dd16e8eb1a6d69501fa0f4095132f460ff85fe47801e2ddb919d3e225c32681c2a08b1df57aab0813bfe63ed76751df32f23ecacd541010a71bcae8916034524ebabf9bd768f1fffa2dcd9e9116f7f30c7e275394911f94c65f5bff10fbf4159b85c0994e70ebbc704213704b73a0ca97ad36e46969558579dcb3f3a5635473b974924cd191327544561b2c7bc1577ca29f930e717eb320c597237e7a25e9142de98795ac7052c3d310b32849024f37cc55a01df8221457edce37c40818ed40ed7af52ee529dbb11dcc28127f1d2cef3a1e223ad1b9430f50386906ac8ab3f161856afcce4c7a93ce8b3639c8515b05786e6b21f1720c9bd7bffd16d506062e51c6c055f73441f205db8a5bc33da6fbfe31254ffe7903011a99141f8b516c9126aa75b4e8d311cdb3b06592c154e823768aba66909d9ade6c9f8f65eaba20beb534f12aa6448c15b6e67967db437cc843630dd3ab0b7a8d3c9bd3ba33e29e7b71f7881d1e3eb2b63c6488a1701adf0d9a1f78087832fa56482f4771d3ec185b15ff595c8b5774570baa81ed1173ab40d66ed225fa492951f067747f3f2e8e0faac2b80e47207e3acb2ecce1a2f46c2d19847b77aa0a37ef235a4a93008ce7db32c71684a5037ccdc99dd257360103d9f0c7e3cbb3571f9f4f03a36f3df24c582b10a0425dc834f3879b9a5a789713d32792329e33ba1452c413b6f9cc5aa71ba59ff0379a9703a3ef8e357f2cbb91e63aca042f26d988c60ce3ab259ef1770adefd5d852bc5fafa72d12f2a3e889e188f774015a7d142a150f3de931bc95146d906c38ca297ad758b71eb4a1697af66c3a5e5fd93c43d6273645cd2cbac8dfa99a1f1cf7a71a07c874bdc68dd63289e63d1ec001ef10c6bc01046be74a69c91ae153d6d73234b1e062e5e1ee925bb4b4d44ca26a09b681b62d683135d45d5f9fb860ab98eec49138fbe06059627218a7dd2264caf66b5349a271dededa71a07a2da2dc9e885c6b82146ca9616b1478f8879bb7fee0a88e69a7d8177ba9a84bd4e9304c56ce5cc63e55a302f30957efef7be1f21c110f8fa9abb1a148061ab31ccbf37ef0e967de773873c0ffc1b92736145ade859aeeee16217424d9809fc8f613fe8e949028a968234f62a32cbe0a118f1282f3e6ec54b32dbe7cba4e279ff9dfa641e622265112c972da6e466dec9a53428b50ebf2385357888c0502cc6012d93fbdd31e5c6334e0dc0100062987c3bcee1e0387d82cb159b5b67ba39f45f38e996c4e330beb02c35b318035cca031c688bde0b1df1a7ad959448c909e470703e7f706b2ca11f01ec4e6e56da1e195cf959a5c4f97985bf536839e824a2938205ef348b521cf1a922d89df8bca4a4172ae2a5fce2dc1ed778b89e722f0c5d8ee1c636c7625194911c6a92fb55db5b2c20b90f1e8647bcdb69d96f4dbea519c1afbc56424f4bc429579ccd08fc9ec4c64826d1f93675a131ce2a3c8ce3f6c54a87ca52ebc66cb87f347e884e9bd3fd57550bac306891d9aa35a4342c8f240bb1c94385f763624ce6cd365062c8b5a191d088a7d876853924d380fa019b827459957bc35c025313036af7b58be212f0db31c322b6baabc316e9335f11776d776989540da63367f59c34c589e0584283c6890f4ea635d21fa3fac8ced154939309dff53e65b8838d2998efaae07870b816663c1344989f24835ebf0a0ae4c4b7cb4eb8d4276575f0c19bcc359d10b316757e060e2587bddd01ffd14d44854e52b9549855c94800e24d3587a1b43d7a49a2590876c23f8dc47b243fc7d8d14fc6e126da37fb5799c76ad17f6f966f4922ebe7fc5bcd0783e5eb37458a6412f4319ee4021f67bffea6e80c5430fdd4638f4ef5f7b86d74bd0735f0bc2912d34922806c1a68820cdc8733da54d4d99b7e1bb9d0eb1496e06dfd07f0e1b6c21e0a8250c9bf873e566df8b050d37d15941ef3ca078dc5623af3b53a55b88fcf88617430c41b1c2541413df81d13662af228866891a8827c6b7e9ac174007a7c6a57e07e3a9761611b470ea6d1a1bd35b02e48d73294f3ed994db905a2593fd7b689ae5c12437df90adbdd32437ebf7ca350f076ad2ef141aa308b9c1d03866f916ffbf00c42e93ee437da12c507e4aa0e986abb45229363caefbe87d18e50aba8bdfa8811c973a3634e9fa0b9828691e6faacc03accaf3174dd592ad31b57944902db28db2c77bdd9f92d08c3d53e3718c70b5c718a0f0243ced61fa09ad6500a21e261152ccf2acf195e13a093b78efe2e7543496b28eed9b9b7e10d49a2937eb53fcb66373d53e2b4875494bdfaa350d2c0ca87aba8cdb3f7afc6da9d60610c69597b946a799bdef362f9a8ee071ba375c8c93c605b84b71aeb639c513ff55efdea4fbc179ff5dea85567f7d46333320be9c0f6d27fe7e6547be227cba358b83487ee42f5e10640ac4123fd67d511eab764d01f34fa28a4dde2c665a28036ea1aca79cabac524b99fccb051a6f3eeb21536850f4ae91f10f3ebd4eb83e9494da1924ab073985bf7480947677c6ed92c60d450c4351e1c3287d1fe5fc39d7b569eb5eb890ef414b765aee4f718b878475f88592ed9520f6bb70b2cea1119c3856b806ff4d7434713360c0ba7425f13da43d43b798bfefeb883947243686c98deb50f9c2d71667088912499507cd011ec91a127c7c1f434e7d89fce9a2f220d979271b4cc971368bc5aefac1b1b8b34c48ba85f634cc7ee48d63963b5d200da64d60624e394f258514e4ec5c8712adb637aa7bb3ad0be8053dbb05649bea6714db46cb3807e10c655ef14ccb5a4d14767abfe9f6c18682bf9f5d6a6ec8d375d836ee828fb3b6a3b79d2d60fd9dbdd826f60835c225d272093546140c0dc135e369293612c70bc04661f9abd7698b999779688d4dfa4612199b05dbba097542552dabcf694322e18e81fba156d1f4030dc5715530a7a509e454301cbe87d7859d8435f849c1818363745c9c034cebb285be026c582884b0e36c1bddd99f7fe07c1c4dbf604fd3f624d48345e04d9fa53837b854a68dcb2db5dd1cbf7a4b8011924c7ca7c444f7fce3e04d16393d8563ad4a2fef34ecba95727927a9046f17b8566b6fcbd8c32779707a6f9d9f76d50fef8da194f800c73cc12092f5c0f7b14a980a3361de26834a134eb2055043461f7441e8ee45f394bd4be05a5ff5a2605dda0c11782f9d5a4372f6cff7f84e3a8dad34c28148946b021466546725119c12c7049b03f73c24305d8f8bc5925240f456a20538a439c68a258d1f1c27929f161ddff74cbdac0aaa1cdeae45b2385b93edcb7aadc2b9e5ac2f6dad62fc7ed562a4b96d0e01ddf9f1a3e11f36362ec5c3a7f0ec85edf7527293a231aa5441e1808c9fe2ad7492ae432564019255a08c12a68a615a02b130971ea1f369421925550f239570e595ce16a40fcc6b3006d205868ad10e955a64eeb854931cb74bd9bdb965502e3ca9212bc9d339e11e2960545d94af276497b21202a9c194d3263f62fb4087646b0f68cfc6d36928fd8df8d014b21cb0defe6ad1c79d4a9ba7426420999b5c35b8b6292082fada1c10ae55ada6959208adfc1c3fd1d58c0a54185be1da0d9610ba98baf016f952b0ee38be534972804eb003252a6d1894f0617cb955d09c6d3d4086452be816daeb7775a61690dcc8ee0e432fca4a7a481c0d85ad34cadb4e0960795c9d4a5ccdabffb957fc59acef2f34d4500326e9f52b43f863a92cd027fe4df8b899d48c4d0914e0db78f9f42d30a7bba5c7de2a3fbb9bcae2e23f5159cb9d2593bf181a434a3b361d321a8079c6178b9fb26ad42b8bf2088b708abc831acf16ddc86214f09668a3fd5f7cbbd1a957c05a0497e4b113809e09dd70ab530f76378dd7e785eea1e172cc789ab3942df1373d1dade393e4747e9f8a1c905ae1f4d5fa4eb215b484d82ca5116bc88a9122724c9c99aaa3df8c2767ab1816606bcbd4758cb940722c812a274a6e7ca18f1182d57b11a9ef12a8a083575d7dd45f75334e3978201de534a79c2e96b88ab330d443910b4376d08dd7d38122a255f88b6887a1c01a71406a47135b688d5c979ecc7b71039ec3f3bc9ccc1e00f4b0e76c5805ad93b2af887e84edb94b3209cb381382de26f3a9759cb3f7f857f141eaddb266c2a58c6ff2f3f65ecfaeb9ff1b8483d1c22cc67d3fc91199c1ec878f7aa3117c42938a0b62b9b8cb3360764170122288706a94bf45d849248aff5e69463c05037f68987606b30db0af67d20556c1a7ceac79daee31bb07c7db9b2f2379c3c04de12d794ec3ea47832615ee9fef3b58c4b3a245316ccfb2ea5f76bcf6c73a439d156fd38bc108ec7cc53fc83dc5037ee777367ad54553c18c228af6e10d9f79519b4f7ad2eca0a070980a7a2db25de00516d6c3bbc52898c835c9f3ee42695af5c68be660867ee3857a16b5aa02f5a7a759764d24c658c528ee68ff9d172f58bc926b554fb22c7bef1691814361eb918373c628a3100437d9be56b97049527d3f6bf500df20ea42c988d420df4f2e0c7cb03ceda87d78a12a28aafcc36095e1201c1efa717cf8200df26edcb6c34ff315a32b89b64e5b1d61f80388758f68cabb5dfdb89dd426b3c11d7f2d8f1101e1cde2aaa85412d195efef01c09da37b5466e7c7bc242967d839e15c15f63b046273b8b6590541cc76b70ee298086e06192126c893dc9a0fcd12661b48654e88944735fa21e60b5f12caec7093d7b4fd9155cefd251cb7b81f3926d61f23d344dc97ff7ee25f466a402b8d28655b2288582eb72c8b4794f1ceaa33fd548410b5b6f68e702fc5ecaee68e9d90772076d76a34609ae87aa6f07f10"}, @typed={0x1004, 0x4, 0x0, 0x0, @binary="4c9365e1b99539cafc93e40dd7b8a0656f77e06ca01e950e4aae6bd06df7bdc3ec2a2a196ab8b5ad3dcaae2d13c58cfc93b60793612c105223bbe2ed2d18c43e0ee71fc3231c54a1a855da1134d249d7ad2a0c54ada2eba9e151a62d3a871dab190fc0186cc367a4726f1a9a470930649ea901dc22264d1793ad45fced3a009a2569eb90994d81a40743c1bcbefa98a9188524091b918f898cba87d9b4e4345712c5848ea2538c14b0ce7028991910c231bf76e9995bcb3ecea9eb56d7be7ffbd556fa99e79392bdc579cda64a8dee66417bc754977a4c0bd1fc5df71747db2d21eff41cbe5352cb6661fbf1ced92aabce65ce3c23f4bee26adb6f2ecd687ca29b8d4d172e28eba023490e460898c447ddc6c1173e999a4fa1bf4fa9357080d62f626523d6fa135c9a310ff24f507c37daa2d42c0e125f0e248aef7486d8ee7b6593541ec2bae9a3bd790b9419a808dad08f2b80aeb49fc768d0f9eb166cac072d669a56a62be1569ee230c6187d51a5803aee1046794018e060189813e934590f9a6d6271034769358d1ec1f40d48f309011faedf8d6224cf90f91017db5c91ada0fab472e64cec8744585cff67a651d64054959084206ebe582283f92e0900c4f25d1964edc2b3058bce15bdb0f76158eb4c359d49064b741e7e0f72c044fc49d984365bb09c73d365d8f806e45a4248695543a4529b88c587470750e0a3171c613d823ab44a8ddf8ff826e6eda1a378bab1205c0bc0ea1f06a854b048c451704222e3a097c8517c2c07e39b031bb4d484e675d88cf1a1cdc9f6fba035de1ac851c24a047946c0a80d04966406e886e66f2203b8c78fb8de17e103e7c97b546e3570e415e87f9b5b4cddd0a3b7d21b7e0ed23b743c392c0509b39dbed6413ecfaa5358c8fc267de38e8e2b5d9fe5c80f162ece8f82566b24187b4bdab94c56db28c542cb342f0b7aaaa22ee360b209bcbaf4a8565d8f2e3d2d7293e41c13bf619658c159f82893ded9f36a8c2a93a7a9c5ce7f40b20455604b87bc50be4b1dca437be6831fee0d8e35da3c090b6a5d816ee29fc7ea3a282d5b3e8ddc3a3b6c07cdf8f79630664b5736afe12161a3d5d6924bd7de05f21937cbe5d0e466e977fa725398b3efc88564ecacf2e29eaf05cd3897b95eeca39a4ed9231e2b77313ca3c9a0a5c82b575703c8bac5d4cbe1ad80968ee63fc35bb19c183bd1fee1a883143387512ca367b301ec5ee99bac11591b2f4daf7eb038205baaf61341248d017ee332a9368898d45d14fb02ed3a0e1be0a4d7616b280b4b08d30314034c1b4a5b6335f189792514e430ad94f4e3941ce507a874f76afc97db7395b89a8f67e07057109ad338b42d116ef48a1035c193fa4fa79e3a991008f3f6710753c083a24116284083881b3ae37e0d7cca98b2541feaf25b74e643be9e8846f218659c17fd7b22558614d6c16c86b73df4828c11ac5c62887bb79f25a479b9a90f2a6167c734acb0c4a0b9f8a405e1a380dcc8416246a8f1e2ba3a3da4e2fb1fb3279af8de526ff269fa5f77ff27d22a0285b979a6f90c45f864b31cdf4bd00ba577e2fe0fbe18c89b89503b40943e22bfa085213ec554e35cec8e87b9a9410e315ffd819b3d13ed474601b44d628d91175cd9618665580a35272a659d540f9c38cc55d66ca3efdbdbb2e9b4580c6a671aecd202ba7e66edb81931042cc533955e7037035256ebbe5c760657e4b944396f6cabe6ef439285d70d65ba36c40bb2605174913e4da8709b6117f4978221cea7398892b870e5b7c3daf340535ecae73617ee8d7ce42dbd90a3f1be6840b62952cb6e58d1f22479b2154213efde199b5a0742eda8051dbb81a34b7dfe98e445d3a2d10a2d96b3ed62c9f619432bc13161cbf99384de2d2b10d0c8c8737f1f052a1d841b67c1f136dca3228dd23eb4ae653df2c1469ef0a3755eb00a538e2cf16c484cb9915a95f7ce8ea2870726642289a8187f93915e08d22572f373659c23ade065cbba32f771daeae1508e7180b90848997d2a260a21368d7f2d2e5819e4671a7ad66dfb892e9f376386cc94de6849c36fe63641d0939120cc32f18aafce416fc95333a0066a6e94976201dacd855efc6e29c921ecc90d6fe1c3cf32c6f9ed6f6c543616af1398a6ac1fc79a96241ac284e4f6ee616f30e8eed87000f3bfcbca33d495255925288c3bcd31fef951b9baacdeafe18df7b267425481e3692ad11c9b3c3d50a3585591ac3f736f9f38065645132155d637757d04da9850a88cc91420197e773aa111548f6750b971386ac73481978916784bb2658f148a7ca831f24f9f1da9276c5a158454840c373c1bceb013ed3d0a5a978786478de8c9e5ff51e97bc63c8341c30c6217d56f844f4a48d9431d2925606b3a61e077609c823ec1c13f76366b0285eaa95e79ff63bf5b4a18899486c325b8ef72a31a0ab69d09cee4adac8a242c7f20a785dc3299e12e3ef16591468e4db17049e5c48f6be4e2375ef48f222b21f037d57f18dfceffc64245d9f825f18ac5825a042c3102b110ecc43e625cdb198861ec1a9da495512f03ce862b80e4b59b2fe32086763868be64a09b2142676405182179188d95ca7b45add98ae5c03fb96c8643c1943e46718fe9fd83060c991168be57386338dea3b8a29dfeab35ca450312f7a807e2c303d91f0674bb03c98d8ec6546d2e25d6b073e0f7a43b65a80fa59282ec69520bdeedd8ddaaa12a6c248ea0ca724f1a2bb1f9e56e59f36794128bd5d596f575af6ccf6340050f6001048dc8341f2b0f01c0308308de66d9e16136f4fdeda5f1d008a803a9e32a7d92b35b777aca7f2d4046214e783f0a92f752f2131beb785646c19309ac9888b7683892c0d7564548742c7cb4881b994462ae706503d732c335194019b3d4815490c1290826990a9a4b7580d3fa8032f06d3a5494dea54d73d6c1f956012631eb524a3391e4fa54fc96652c8e6abc0e0b75d3e6dbf0abde878339e79ae4ef39f959e5658c4dde58b95908f673dfebe5763d8200402f582c2203a9d68c37c257d05d5781d044f6a1589b7eb0b7cfe801b3b531d0e24838d758f7f34baf1d474893ba3abce406f28ca159612400fa3380d99fea2968391b1adbe43fd11263e5949579e9a1f3268f072ddf76a151fbca1c87abb46a1ddcbe9bd440f40fd0a0b206d334f6e68a790924e10c9eb8bcf1e7ee8415193aa2046293652c91309c14b4c34c5504b509e98b70c7a18e9bd727d5bde923a39a733838f2c88636eebb2bbe879cca4c58cede033bf11f7f31caad3998a70d8034cd233150a8a41a5a2e7f84a55f6f8de1da70103e2ea115b35ee848b47f9cc3931855736638decec3211d8ce82ffcaca966f3d07ade4ec67fc19209e1c4bedb406ab87fe009ca46cd35d63cc7fefe9e6f1bded3ed7d5206775db8b8a60d6c8c3aa157ba2e0a17001e4bd9124f65b8cd5249fad6cea1d33da124390307874c27c0ffa69b30d213e84b2284013f1aea08a980a19c53f97e9ce16e100e1b5fda7cfb999b2e71cb31b896a7c5b7cee6143a7bbe8a0be47284b3d40774dc17afe3da896977e25b30c0fa77966692115ba8a3ba0415547dbe1625769ef6071c575ee1da8ba0de001f3470f2dcdd3636bfeeb16bce926347f488b7b9f79a919868fe004155bb84f40cfac783daaed7b62a5a68f8ae97a35f90eec668d36e8a179cb262a1fcc6e86785f0351f5ff671ff1db82e846b6f9abbdb655a824c3cb7d04ade6c12ac3d2593b0f78fdf3296145e7286e8e46603c31a2b7f4b19c7b5092c353953324dc02b47c33f4c5d7984e9a3f957a01e1f9cabb6c470438e8807e71dc4399fa943f5d95e51dabbd7851473fc616db2b40eddfe5489db095c2b0d69777d9ceb49d9d9cdbe57a25529193168df79b65c4d3f8f3c860d49e170658fc66b8317e51cc4f76163db65cf63f24ebc6fd2ea9fbe8f5b0384b591e4fb72c9e76bb146d10cf31f302f61fee838d9848b9d9640ffd4001aa70bdf96d5c1eea7fabde27358ce478d2d3887e998875aa9a065cc5fe48aba5fe06501abb28374c19c5ccb44940afd112a088c211e637d8731d66ca0676c8a8836b008148b0d4635b35a23c052a2bda3626342115316ad0ac3539a96373bf1b07917b22f28598c6b2ebb6bf3bbb50898536bbf46d97a984c5a689cc21a2ea35eff5ae8d9f3aa45c139693fbdebb9739203e394923365ab15f0171a71e2059d1470b7ac175a7c698ef538bb3cad1c6c8ebd081fcfa83e3267d568761c0c0627adc64b115e19b2e1d1d8754c05f0fda4fabd479d0e672c4977df2b24516a2f179ac503371806f49df55644bcf9aa9e652a8cd6979ddd067f73090156baef52ad8f7329adfde05748e89fea77133c98702e8c733d982c7d86a575e0d21cef175853719fbc3bbd9ffb36c15048a6e84d029dbb1026fb2349e907d3f8056fff991f518ee91fb198bd5a85a489344e7bf20c698856e4790768ea7c6c7348d42c213421eb57ebf9ca4ab03fa32c529bd78cf775a4851179b3030764225915d3eb6a4cabf7e35a85aeace82263f71aff6417f48bc5844a1ef853a1b6c156216e4257280e4ede49ef1580a262e63af587848f43c3a720ae3bf68c127ded68bea321142030a9e9876be970f43b940aa1c94f59c653f48a8b5dbc9337cb74d1cdec240f9a4fb082b7e19cf0343195a364c3fbae25f0f816ddf2f08b0911f2b7b16faeb97c637f15b2a1f90814e9ea869e4103c92b7b4ecbcb0326f989b1c818a1d4607c28adb599bc1e08920c4c11f9f8b739b30ebab64d048c9fb0eeb8a6933ae76577a9062170a20af69512d173853ed77bf2b95081f0e808ea7241e5223aca34237b0cb47568ecba3af50fe53a4ee6a1a274ae62597240838125f451a56f8b372692ecd7dba531c4fd28c5a4c1ed9eaadee87b77b4830579e82c341af9d098826bef9f2b3cce99d62b3e885511e4177411fffa8b7191f09c06c38b66b681d12dc2753fbdda5a50d79483f84139585b795829ac749b682f61e582bc4fa431cdbcf7f2b7c6aedd637e0802547035b48fb9a597a29d334515eb4748062449b01546c998c295a8a52e376f591afe49adf45e8b1a473a6b9fe55be02c70ea5161a2ab472b76f2c6602ae5b508ca6126141e92d10476e5f453a8244b520aeb78354a62e7504ed3bdee660b813ca644fd638de486caa135ddb90d7f1176081bd2ca3f2e2929822d792eedc860335be1d09a4e07cd93aa9827b2001938c19b84c50d6bd6b8fbc71f0216aefe227d5bb5a1ed167485282fa7e801a1540de72f989cc65d9d44fe3a0a61d3359e3642b1d9a42d1e1375785828812147a03e16bbf5c85ab943954520e5e25d8abbe7b6032e92091ca8b9b9ebe343ecd403da9ce81b9b4f2c47e3a5d4a458f6edde0ecea41f46bf3032d3ec7e7d4af8fb5cd1a9b916c65723d110cea6f17c7c352a6be9abec5f3f3799d50d5b69b556e024c482d7076708501672ac4bf024f30acfcbd46804f5f32af2040e7f6a7d52bde679edd33f33f30042a16c542768ac3545158084a08fddf846100c4047ea9be156968a4e98fcafd23b9e032e204b905b0788265ff3a2710b42f755a4d1f8453214c2e9e423f9716eae83f5406032e4d008ab440eac7ebec5a489c6b67d6d07a24dae11360248dcd47beb2e2f51178a0c98e920b8a31edb9dd890fd4ebbfa49dc1f117828d3146f62428e812dcd906edc82964efbf644aefb5bb743a7795822e10be0db08a89e1e41423ba591c68974b0d12b9f40a9"}, @typed={0x8, 0x13, 0x0, 0x0, @fd=r4}, @typed={0x8, 0x68, 0x0, 0x0, @pid}]}]}, 0x3650}], 0x5, &(0x7f0000007840)=[@rights={{0x20, 0x1, 0x1, [0xffffffffffffffff, 0xffffffffffffffff, r4, 0xffffffffffffffff]}}, @rights={{0x28, 0x1, 0x1, [r8, r9, 0xffffffffffffffff, r0, r5, r3]}}], 0x48, 0x8080}, 0x2000880d) [ 1985.538752][T28562] bond977: entered promiscuous mode [ 1985.550325][T28562] 8021q: adding VLAN 0 to HW filter on device bond977 01:55:35 executing program 1: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) (async) listen(r0, 0x0) r1 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r1, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) (async) listen(r1, 0x0) (async) accept4(r1, 0x0, 0x0, 0x0) ioctl$sock_inet_SIOCSIFFLAGS(r1, 0x8914, &(0x7f0000000040)={'team_slave_0\x00', 0x4000}) (async) setsockopt$inet6_tcp_int(r1, 0x6, 0x19, &(0x7f0000000280)=0x3f, 0x4) r2 = socket$inet6(0xa, 0x3, 0x20) sendto$inet6(r2, &(0x7f00000001c0)="73fe3a96339fd1ab0b0212f99a46a20dc4e309aa2fec8573556b94f492bac75bed55bfaca36cf0fbe00c0a83da6ff91584ed0e5a9d093a566741a5ecdb51759c4ddf43b0f27ecedd7ee398e55de82ed24c8ea6735b2ae9dccb8fa82a088e74", 0x5f, 0x50, 0x0, 0x0) accept4(r0, 0x0, 0x0, 0x0) (async) r3 = socket$netlink(0x10, 0x3, 0x0) (async) r4 = socket$netlink(0x10, 0x3, 0x0) (async) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(0xffffffffffffffff, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) (async) getsockname$packet(0xffffffffffffffff, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r4, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r5, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) (async) sendmsg$nl_route(r3, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x4c}}, 0x0) pipe(&(0x7f0000000100)={0xffffffffffffffff}) ioctl$BTRFS_IOC_TREE_SEARCH_V2(r4, 0xc0709411, &(0x7f0000000140)={{0x0, 0x7ff, 0x7, 0x8, 0x40000000, 0xffff, 0x7, 0x0, 0x4, 0x1, 0xffffffff, 0x9, 0x8000, 0x7, 0x1}, 0x8, [0x0]}) socket$inet6_sctp(0xa, 0x5, 0x84) (async) ioctl$BTRFS_IOC_INO_LOOKUP(r6, 0xd0009412, &(0x7f00000006c0)={r7, 0x4}) 01:55:35 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0xea01, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1985.626625][T28566] bond977: (slave bridge935): making interface the new active one [ 1985.634663][T28566] bridge935: entered promiscuous mode [ 1985.643965][T28566] bond977: (slave bridge935): Enslaving as an active interface with an up link [ 1985.800848][T28578] bond925: entered promiscuous mode [ 1985.809646][T28578] 8021q: adding VLAN 0 to HW filter on device bond925 01:55:35 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x1ac, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1985.920905][T28580] bond925: (slave bridge892): making interface the new active one [ 1985.929142][T28580] bridge892: entered promiscuous mode [ 1985.943302][T28580] bond925: (slave bridge892): Enslaving as an active interface with an up link [ 1985.962532][T28582] bond365 (uninitialized): Released all slaves 01:55:35 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, 0x0, &(0x7f0000000080)) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x262, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8}]}, 0x4c}}, 0x0) [ 1986.110778][T28585] bond1030: entered promiscuous mode [ 1986.125137][T28585] 8021q: adding VLAN 0 to HW filter on device bond1030 01:55:35 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f00000000c0)={'#! ', './file0'}, 0xb) r1 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r1, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r1, 0x0) r2 = socket$igmp6(0xa, 0x3, 0x2) write$binfmt_elf32(r2, &(0x7f0000000400)={{0x7f, 0x45, 0x4c, 0x46, 0x9, 0x3, 0x8c, 0x81, 0x80000000, 0x2, 0x3, 0x80000000, 0x14f, 0x38, 0x156, 0x3d, 0x924, 0x20, 0x1, 0x8001, 0x1, 0x8e}, [{0x3, 0x2, 0x4, 0xcf16, 0x9, 0x20, 0x80, 0xfffffffe}, {0x60000000, 0x0, 0x48c00, 0x34, 0x7, 0x9, 0x1, 0x41}], "a65653afb4af18555f4538ab73cd1429bc461f4a7576432212c656c029635a1a1a0ebe688057049b52845553c3fcd840f95a1f047e9932032038c834c7d77ad716285ec605d7c9b6f7aa7b1862c0183d0e7775a8a36c8d308b43d1c580e0580014111223ae8a7b4bd08355faffab0c8f5115e5ea422d749a130e8f7a8924d53d6acc775cc91be14a207322f6a0c3106ec457", ['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00']}, 0x80a) setsockopt$inet6_MCAST_MSFILTER(r1, 0x29, 0x30, &(0x7f0000000140)={0x3f, {{0xa, 0x4e24, 0x79, @local, 0x2}}, 0x0, 0x2, [{{0xa, 0x4e23, 0x4, @private0, 0x5}}, {{0xa, 0x4e21, 0x20, @mcast1, 0x6}}]}, 0x190) setsockopt$inet6_tcp_TCP_ULP(r1, 0x6, 0x1f, &(0x7f0000000100), 0x4) r3 = accept4(r1, 0x0, 0x0, 0x0) connect$unix(r3, &(0x7f0000000080)=@file={0x0, './file0\x00'}, 0x6e) r4 = accept4$inet6(0xffffffffffffffff, &(0x7f0000000300)={0xa, 0x0, 0x0, @private1}, &(0x7f0000000340)=0x1c, 0x80800) sendto$inet6(r4, &(0x7f0000000380)="dd83285ca82817604da5e8d1a38fa2f0407cb04c8256ebff1ce7111cc363ec", 0x1f, 0x24000000, &(0x7f0000000c40)={0xa, 0x4e24, 0x5, @remote, 0x9d72}, 0x1c) sendto$inet6(r3, &(0x7f00000000c0), 0xfffffdda, 0x0, 0x0, 0x600000000000004) splice(r3, &(0x7f0000000000)=0x1ff, r0, &(0x7f0000000040)=0x5, 0xf99c, 0x8) [ 1986.202556][T28588] bond1030: (slave bridge994): making interface the new active one [ 1986.216976][T28588] bridge994: entered promiscuous mode [ 1986.228206][T28588] bond1030: (slave bridge994): Enslaving as an active interface with an up link 01:55:35 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x2400, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1986.289412][T28600] bond978: entered promiscuous mode [ 1986.296828][T28600] 8021q: adding VLAN 0 to HW filter on device bond978 01:55:36 executing program 1: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r0, 0x0) accept4(r0, 0x0, 0x0, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000000)=0x7) sendmsg$nl_route(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=ANY=[@ANYBLOB="4c00000010001fff00"/20, @ANYRES32=0x0, @ANYBLOB="0000000000000000240012800b0001006272696467650000140002800500260000000000080005000000000008000a00", @ANYRES32=r3, @ANYBLOB="af12dad23be3de2d984991cb9e235c1fbd7a43c581f33235758e8f4c0599a8bbc9ac6a6b03fb57aa1f8bd3e252098207d23060edb801ccc5029e945259930937e24efc5aa05a63a98b751f9691bcb88cb7c289a825eefb2dcb4450f51937d954a32deba56c021f71a249526ab06b17f04e86733cf45ec80e1d31de672458d58d109440bfc5c913924480af8652da7ccfe2c4e1b3be1b"], 0x4c}}, 0x0) 01:55:36 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0xea03, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1986.480449][T28607] bond978: (slave bridge936): making interface the new active one [ 1986.490317][T28607] bridge936: entered promiscuous mode [ 1986.506902][T28607] bond978: (slave bridge936): Enslaving as an active interface with an up link [ 1986.580001][T28612] bond926: entered promiscuous mode [ 1986.593130][T28612] 8021q: adding VLAN 0 to HW filter on device bond926 [ 1986.631339][T28614] bond926: (slave bridge893): making interface the new active one [ 1986.640846][T28614] bridge893: entered promiscuous mode [ 1986.659546][T28614] bond926: (slave bridge893): Enslaving as an active interface with an up link [ 1986.682318][T28617] bond365 (uninitialized): Released all slaves 01:55:36 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x1ba, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) 01:55:36 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, 0x0) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x262, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1986.867068][T28623] bond1031: entered promiscuous mode [ 1986.873353][T28623] 8021q: adding VLAN 0 to HW filter on device bond1031 01:55:36 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x3201, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1986.963675][T28625] bond1031: (slave bridge995): making interface the new active one [ 1986.971889][T28625] bridge995: entered promiscuous mode [ 1986.981945][T28625] bond1031: (slave bridge995): Enslaving as an active interface with an up link [ 1987.114260][T28633] bond979: entered promiscuous mode [ 1987.119922][T28633] 8021q: adding VLAN 0 to HW filter on device bond979 01:55:36 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f00000000c0)={'#! ', './file0'}, 0xb) r1 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r1, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r1, 0x0) (async) r2 = socket$igmp6(0xa, 0x3, 0x2) write$binfmt_elf32(r2, &(0x7f0000000400)={{0x7f, 0x45, 0x4c, 0x46, 0x9, 0x3, 0x8c, 0x81, 0x80000000, 0x2, 0x3, 0x80000000, 0x14f, 0x38, 0x156, 0x3d, 0x924, 0x20, 0x1, 0x8001, 0x1, 0x8e}, [{0x3, 0x2, 0x4, 0xcf16, 0x9, 0x20, 0x80, 0xfffffffe}, {0x60000000, 0x0, 0x48c00, 0x34, 0x7, 0x9, 0x1, 0x41}], "a65653afb4af18555f4538ab73cd1429bc461f4a7576432212c656c029635a1a1a0ebe688057049b52845553c3fcd840f95a1f047e9932032038c834c7d77ad716285ec605d7c9b6f7aa7b1862c0183d0e7775a8a36c8d308b43d1c580e0580014111223ae8a7b4bd08355faffab0c8f5115e5ea422d749a130e8f7a8924d53d6acc775cc91be14a207322f6a0c3106ec457", ['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00']}, 0x80a) (async) setsockopt$inet6_MCAST_MSFILTER(r1, 0x29, 0x30, &(0x7f0000000140)={0x3f, {{0xa, 0x4e24, 0x79, @local, 0x2}}, 0x0, 0x2, [{{0xa, 0x4e23, 0x4, @private0, 0x5}}, {{0xa, 0x4e21, 0x20, @mcast1, 0x6}}]}, 0x190) setsockopt$inet6_tcp_TCP_ULP(r1, 0x6, 0x1f, &(0x7f0000000100), 0x4) (async) r3 = accept4(r1, 0x0, 0x0, 0x0) connect$unix(r3, &(0x7f0000000080)=@file={0x0, './file0\x00'}, 0x6e) (async) r4 = accept4$inet6(0xffffffffffffffff, &(0x7f0000000300)={0xa, 0x0, 0x0, @private1}, &(0x7f0000000340)=0x1c, 0x80800) sendto$inet6(r4, &(0x7f0000000380)="dd83285ca82817604da5e8d1a38fa2f0407cb04c8256ebff1ce7111cc363ec", 0x1f, 0x24000000, &(0x7f0000000c40)={0xa, 0x4e24, 0x5, @remote, 0x9d72}, 0x1c) (async) sendto$inet6(r3, &(0x7f00000000c0), 0xfffffdda, 0x0, 0x0, 0x600000000000004) (async) splice(r3, &(0x7f0000000000)=0x1ff, r0, &(0x7f0000000040)=0x5, 0xf99c, 0x8) [ 1987.257555][T28635] bond979: (slave bridge937): making interface the new active one [ 1987.267210][T28635] bridge937: entered promiscuous mode [ 1987.281662][T28635] bond979: (slave bridge937): Enslaving as an active interface with an up link 01:55:36 executing program 1: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r0, 0x0) accept4(r0, 0x0, 0x0, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000000)=0x7) sendmsg$nl_route(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=ANY=[@ANYBLOB="4c00000010001fff00"/20, @ANYRES32=0x0, @ANYBLOB="0000000000000000240012800b0001006272696467650000140002800500260000000000080005000000000008000a00", @ANYRES32=r3, @ANYBLOB="af12dad23be3de2d984991cb9e235c1fbd7a43c581f33235758e8f4c0599a8bbc9ac6a6b03fb57aa1f8bd3e252098207d23060edb801ccc5029e945259930937e24efc5aa05a63a98b751f9691bcb88cb7c289a825eefb2dcb4450f51937d954a32deba56c021f71a249526ab06b17f04e86733cf45ec80e1d31de672458d58d109440bfc5c913924480af8652da7ccfe2c4e1b3be1b"], 0x4c}}, 0x0) socket$inet6_tcp(0xa, 0x1, 0x0) (async) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) (async) listen(r0, 0x0) (async) accept4(r0, 0x0, 0x0, 0x0) (async) socket$netlink(0x10, 0x3, 0x0) (async) socket$netlink(0x10, 0x3, 0x0) (async) socket(0x10, 0x803, 0x0) (async) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) (async) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000000)=0x7) (async) sendmsg$nl_route(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=ANY=[@ANYBLOB="4c00000010001fff00"/20, @ANYRES32=0x0, @ANYBLOB="0000000000000000240012800b0001006272696467650000140002800500260000000000080005000000000008000a00", @ANYRES32=r3, @ANYBLOB="af12dad23be3de2d984991cb9e235c1fbd7a43c581f33235758e8f4c0599a8bbc9ac6a6b03fb57aa1f8bd3e252098207d23060edb801ccc5029e945259930937e24efc5aa05a63a98b751f9691bcb88cb7c289a825eefb2dcb4450f51937d954a32deba56c021f71a249526ab06b17f04e86733cf45ec80e1d31de672458d58d109440bfc5c913924480af8652da7ccfe2c4e1b3be1b"], 0x4c}}, 0x0) (async) 01:55:37 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0xf000, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1987.410665][T28641] bond927: entered promiscuous mode [ 1987.424390][T28641] 8021q: adding VLAN 0 to HW filter on device bond927 [ 1987.448050][T28644] bond365 (uninitialized): Released all slaves [ 1987.562171][T28645] bond927: (slave bridge894): making interface the new active one [ 1987.570467][T28645] bridge894: entered promiscuous mode [ 1987.584286][T28645] bond927: (slave bridge894): Enslaving as an active interface with an up link 01:55:37 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, 0x0) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x262, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) 01:55:37 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x1e2, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1987.749225][T28651] bond1032: entered promiscuous mode [ 1987.755776][T28651] 8021q: adding VLAN 0 to HW filter on device bond1032 01:55:37 executing program 5: openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) (async) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f00000000c0)={'#! ', './file0'}, 0xb) socket$inet6_tcp(0xa, 0x1, 0x0) (async) r1 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r1, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r1, 0x0) socket$igmp6(0xa, 0x3, 0x2) (async) r2 = socket$igmp6(0xa, 0x3, 0x2) write$binfmt_elf32(r2, &(0x7f0000000400)={{0x7f, 0x45, 0x4c, 0x46, 0x9, 0x3, 0x8c, 0x81, 0x80000000, 0x2, 0x3, 0x80000000, 0x14f, 0x38, 0x156, 0x3d, 0x924, 0x20, 0x1, 0x8001, 0x1, 0x8e}, [{0x3, 0x2, 0x4, 0xcf16, 0x9, 0x20, 0x80, 0xfffffffe}, {0x60000000, 0x0, 0x48c00, 0x34, 0x7, 0x9, 0x1, 0x41}], "a65653afb4af18555f4538ab73cd1429bc461f4a7576432212c656c029635a1a1a0ebe688057049b52845553c3fcd840f95a1f047e9932032038c834c7d77ad716285ec605d7c9b6f7aa7b1862c0183d0e7775a8a36c8d308b43d1c580e0580014111223ae8a7b4bd08355faffab0c8f5115e5ea422d749a130e8f7a8924d53d6acc775cc91be14a207322f6a0c3106ec457", ['\x00', '\x00', '\x00', '\x00', '\x00', '\x00', '\x00']}, 0x80a) setsockopt$inet6_MCAST_MSFILTER(r1, 0x29, 0x30, &(0x7f0000000140)={0x3f, {{0xa, 0x4e24, 0x79, @local, 0x2}}, 0x0, 0x2, [{{0xa, 0x4e23, 0x4, @private0, 0x5}}, {{0xa, 0x4e21, 0x20, @mcast1, 0x6}}]}, 0x190) setsockopt$inet6_tcp_TCP_ULP(r1, 0x6, 0x1f, &(0x7f0000000100), 0x4) r3 = accept4(r1, 0x0, 0x0, 0x0) connect$unix(r3, &(0x7f0000000080)=@file={0x0, './file0\x00'}, 0x6e) r4 = accept4$inet6(0xffffffffffffffff, &(0x7f0000000300)={0xa, 0x0, 0x0, @private1}, &(0x7f0000000340)=0x1c, 0x80800) sendto$inet6(r4, &(0x7f0000000380)="dd83285ca82817604da5e8d1a38fa2f0407cb04c8256ebff1ce7111cc363ec", 0x1f, 0x24000000, &(0x7f0000000c40)={0xa, 0x4e24, 0x5, @remote, 0x9d72}, 0x1c) (async) sendto$inet6(r4, &(0x7f0000000380)="dd83285ca82817604da5e8d1a38fa2f0407cb04c8256ebff1ce7111cc363ec", 0x1f, 0x24000000, &(0x7f0000000c40)={0xa, 0x4e24, 0x5, @remote, 0x9d72}, 0x1c) sendto$inet6(r3, &(0x7f00000000c0), 0xfffffdda, 0x0, 0x0, 0x600000000000004) (async) sendto$inet6(r3, &(0x7f00000000c0), 0xfffffdda, 0x0, 0x0, 0x600000000000004) splice(r3, &(0x7f0000000000)=0x1ff, r0, &(0x7f0000000040)=0x5, 0xf99c, 0x8) [ 1987.806945][T28652] bond1032: (slave bridge996): making interface the new active one [ 1987.815880][T28652] bridge996: entered promiscuous mode [ 1987.830140][T28652] bond1032: (slave bridge996): Enslaving as an active interface with an up link 01:55:37 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x3c00, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1987.949963][T28664] bond980: entered promiscuous mode [ 1987.964754][T28664] 8021q: adding VLAN 0 to HW filter on device bond980 01:55:37 executing program 1: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) (async) listen(r0, 0x0) (async) accept4(r0, 0x0, 0x0, 0x0) (async) r1 = socket$netlink(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) (async) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000000)=0x7) sendmsg$nl_route(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000100)=ANY=[@ANYBLOB="4c00000010001fff00"/20, @ANYRES32=0x0, @ANYBLOB="0000000000000000240012800b0001006272696467650000140002800500260000000000080005000000000008000a00", @ANYRES32=r3, @ANYBLOB="af12dad23be3de2d984991cb9e235c1fbd7a43c581f33235758e8f4c0599a8bbc9ac6a6b03fb57aa1f8bd3e252098207d23060edb801ccc5029e945259930937e24efc5aa05a63a98b751f9691bcb88cb7c289a825eefb2dcb4450f51937d954a32deba56c021f71a249526ab06b17f04e86733cf45ec80e1d31de672458d58d109440bfc5c913924480af8652da7ccfe2c4e1b3be1b"], 0x4c}}, 0x0) [ 1988.143533][T28668] bond980: (slave bridge938): making interface the new active one [ 1988.158593][ T5300] BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low! [ 1988.164237][ T5300] turning off the locking correctness validator. [ 1988.170540][ T5300] CPU: 0 PID: 5300 Comm: kworker/u4:5 Not tainted 6.5.0-rc5-syzkaller-01349-g6a1ed1430daa #0 [ 1988.180669][ T5300] Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 07/26/2023 [ 1988.190716][ T5300] Workqueue: bond1005 bond_netdev_notify_work [ 1988.196831][ T5300] Call Trace: [ 1988.200090][ T5300] [ 1988.203004][ T5300] dump_stack_lvl+0xd9/0x1b0 [ 1988.207588][ T5300] __lock_acquire+0x4286/0x5de0 [ 1988.212432][ T5300] ? lockdep_hardirqs_on_prepare+0x410/0x410 [ 1988.218397][ T5300] ? __lock_acquire+0x182f/0x5de0 [ 1988.223409][ T5300] lock_acquire+0x1ae/0x510 [ 1988.227897][ T5300] ? psi_task_switch+0x55a/0x900 [ 1988.232823][ T5300] ? lock_sync+0x190/0x190 [ 1988.237225][ T5300] ? lockdep_hardirqs_on_prepare+0x410/0x410 [ 1988.243189][ T5300] ? rcu_is_watching+0x12/0xb0 [ 1988.247939][ T5300] ? trace_pelt_se_tp+0xe9/0x120 [ 1988.252861][ T5300] ? __update_load_avg_se+0x3c7/0x8d0 [ 1988.258223][ T5300] psi_group_change+0x131/0xdc0 [ 1988.263057][ T5300] ? psi_task_switch+0x55a/0x900 [ 1988.267984][ T5300] psi_task_switch+0x55a/0x900 [ 1988.272736][ T5300] __schedule+0x2336/0x59f0 [ 1988.277234][ T5300] ? io_schedule_timeout+0x150/0x150 [ 1988.282512][ T5300] ? mark_held_locks+0x9f/0xe0 [ 1988.287261][ T5300] preempt_schedule_irq+0x52/0x90 [ 1988.292263][ T5300] irqentry_exit+0x35/0x80 [ 1988.296665][ T5300] asm_sysvec_apic_timer_interrupt+0x1a/0x20 [ 1988.302636][ T5300] RIP: 0010:queue_delayed_work_on+0x9a/0x130 [ 1988.308603][ T5300] Code: ff 48 89 ee e8 c7 26 31 00 48 85 ed 75 42 e8 2d 2b 31 00 9c 5b 81 e3 00 02 00 00 31 ff 48 89 de e8 ab 26 31 00 48 85 db 75 71 11 2b 31 00 44 89 e8 48 83 c4 08 5b 5d 41 5c 41 5d 41 5e 41 5f [ 1988.328193][ T5300] RSP: 0018:ffffc9001ae0fbe0 EFLAGS: 00000293 [ 1988.334249][ T5300] RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000000 [ 1988.342215][ T5300] RDX: ffff88807ba6bb80 RSI: ffffffff8154f5c6 RDI: 0000000000000007 [ 1988.350182][ T5300] RBP: 0000000000000200 R08: 0000000000000007 R09: 0000000000000000 [ 1988.358138][ T5300] R10: 0000000000000000 R11: 0000000000000000 R12: ffff88816410cc00 [ 1988.366090][ T5300] R13: 0000000000000001 R14: ffff888162424000 R15: 0000000000000001 [ 1988.374049][ T5300] ? queue_delayed_work_on+0xe6/0x130 [ 1988.379414][ T5300] bond_netdev_notify_work+0x26d/0x2c0 [ 1988.384863][ T5300] ? bond_xmit_activebackup_slave_get+0xd0/0xd0 [ 1988.391089][ T5300] ? reacquire_held_locks+0x4b0/0x4b0 [ 1988.396445][ T5300] ? do_raw_spin_lock+0x12e/0x2b0 [ 1988.401453][ T5300] ? spin_bug+0x1d0/0x1d0 [ 1988.405772][ T5300] process_one_work+0xaa2/0x16f0 [ 1988.410695][ T5300] ? bond_xmit_activebackup_slave_get+0xd0/0xd0 [ 1988.416923][ T5300] ? pwq_dec_nr_in_flight+0x2a0/0x2a0 [ 1988.422277][ T5300] ? spin_bug+0x1d0/0x1d0 [ 1988.426591][ T5300] worker_thread+0x687/0x1110 [ 1988.431256][ T5300] ? __kthread_parkme+0x152/0x220 [ 1988.436261][ T5300] ? process_one_work+0x16f0/0x16f0 [ 1988.441441][ T5300] kthread+0x33a/0x430 [ 1988.445488][ T5300] ? kthread_complete_and_exit+0x40/0x40 [ 1988.451104][ T5300] ret_from_fork+0x2c/0x70 [ 1988.455507][ T5300] ? kthread_complete_and_exit+0x40/0x40 [ 1988.461118][ T5300] ret_from_fork_asm+0x11/0x20 [ 1988.465885][ T5300] 01:55:38 executing program 1: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r0, 0x0) accept4(r0, 0x0, 0x0, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r3, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) r4 = socket$inet6_tcp(0xa, 0x1, 0x0) sendto$inet6(r4, 0x0, 0x0, 0x20000004, &(0x7f0000000080)={0xa, 0x4e22}, 0x1c) recvfrom$inet6(r4, &(0x7f0000000000)=""/26, 0x1a, 0x40004050, &(0x7f0000001880)={0xa, 0x0, 0x0, @rand_addr=' \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02'}, 0x1c) ioctl$sock_SIOCGIFVLAN_ADD_VLAN_CMD(r4, 0x8982, &(0x7f0000000100)={0x0, 'veth0_to_batadv\x00', {}, 0x33a}) getsockname$packet(r3, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r5, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x20, 0x10, 0x200, 0x70bd29, 0x25dfdbfb}, 0x20}}, 0x0) 01:55:38 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0xf002, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1988.491093][T28668] bridge938: entered promiscuous mode [ 1988.506595][T28668] bond980: (slave bridge938): Enslaving as an active interface with an up link [ 1988.516532][T28679] validate_nla: 15 callbacks suppressed [ 1988.516549][T28679] netlink: 'syz-executor.2': attribute type 1 has an invalid length. [ 1988.580116][T28679] bond928: entered promiscuous mode [ 1988.588692][T28679] 8021q: adding VLAN 0 to HW filter on device bond928 [ 1988.604501][T28680] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 01:55:38 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x1ea, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1988.620015][T28680] workqueue: Failed to create a rescuer kthread for wq "bond365": -EINTR [ 1988.650315][T28681] bond928: (slave bridge895): making interface the new active one [ 1988.672598][T28681] bridge895: entered promiscuous mode [ 1988.686882][T28681] bond928: (slave bridge895): Enslaving as an active interface with an up link 01:55:38 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, 0x0) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x262, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1988.739031][T28686] netlink: 'syz-executor.0': attribute type 1 has an invalid length. 01:55:38 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f00000000c0)={'#! ', './file0'}, 0xfffffdb4) [ 1988.801325][T28686] bond1033: entered promiscuous mode [ 1988.807575][T28686] 8021q: adding VLAN 0 to HW filter on device bond1033 01:55:38 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x3f00, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1988.856338][T28689] bond1033: (slave bridge997): making interface the new active one [ 1988.864361][T28689] bridge997: entered promiscuous mode [ 1988.874339][T28689] bond1033: (slave bridge997): Enslaving as an active interface with an up link [ 1988.884307][T28708] netlink: 'syz-executor.3': attribute type 1 has an invalid length. [ 1988.942032][T28708] bond981: entered promiscuous mode [ 1988.947498][T28708] 8021q: adding VLAN 0 to HW filter on device bond981 [ 1989.074859][T28710] bond981: (slave bridge939): making interface the new active one [ 1989.117420][T28710] bridge939: entered promiscuous mode [ 1989.147814][T28710] bond981: (slave bridge939): Enslaving as an active interface with an up link [ 1989.157628][T28714] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 01:55:38 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0xf201, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1989.231425][T28714] bond532: entered promiscuous mode [ 1989.257982][T28714] 8021q: adding VLAN 0 to HW filter on device bond532 [ 1989.310948][T28716] netlink: 'syz-executor.2': attribute type 1 has an invalid length. [ 1989.349556][T28716] bond929: entered promiscuous mode 01:55:39 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f00000000c0)={'#! ', './file0'}, 0xfffffdb4) (async) write$binfmt_script(r0, &(0x7f00000000c0)={'#! ', './file0'}, 0xfffffdb4) 01:55:39 executing program 1: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r0, 0x0) accept4(r0, 0x0, 0x0, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket$netlink(0x10, 0x3, 0x0) socket(0x10, 0x803, 0x0) (async) r3 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r3, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) socket$inet6_tcp(0xa, 0x1, 0x0) (async) r4 = socket$inet6_tcp(0xa, 0x1, 0x0) sendto$inet6(r4, 0x0, 0x0, 0x20000004, &(0x7f0000000080)={0xa, 0x4e22}, 0x1c) (async) sendto$inet6(r4, 0x0, 0x0, 0x20000004, &(0x7f0000000080)={0xa, 0x4e22}, 0x1c) recvfrom$inet6(r4, &(0x7f0000000000)=""/26, 0x1a, 0x40004050, &(0x7f0000001880)={0xa, 0x0, 0x0, @rand_addr=' \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02'}, 0x1c) (async) recvfrom$inet6(r4, &(0x7f0000000000)=""/26, 0x1a, 0x40004050, &(0x7f0000001880)={0xa, 0x0, 0x0, @rand_addr=' \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02'}, 0x1c) ioctl$sock_SIOCGIFVLAN_ADD_VLAN_CMD(r4, 0x8982, &(0x7f0000000100)={0x0, 'veth0_to_batadv\x00', {}, 0x33a}) (async) ioctl$sock_SIOCGIFVLAN_ADD_VLAN_CMD(r4, 0x8982, &(0x7f0000000100)={0x0, 'veth0_to_batadv\x00', {}, 0x33a}) getsockname$packet(r3, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r5, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x20, 0x10, 0x200, 0x70bd29, 0x25dfdbfb}, 0x20}}, 0x0) [ 1989.366940][T28716] 8021q: adding VLAN 0 to HW filter on device bond929 [ 1989.395730][T28720] netlink: 'syz-executor.4': attribute type 1 has an invalid length. 01:55:39 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x0) r1 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r1, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r1, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r2, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x262, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r2}]}, 0x4c}}, 0x0) [ 1989.426562][T28720] bond365 (uninitialized): Released all slaves 01:55:39 executing program 1: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) (async) listen(r0, 0x0) (async) accept4(r0, 0x0, 0x0, 0x0) (async) r1 = socket$netlink(0x10, 0x3, 0x0) (async) r2 = socket$netlink(0x10, 0x3, 0x0) (async) r3 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r3, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) r4 = socket$inet6_tcp(0xa, 0x1, 0x0) sendto$inet6(r4, 0x0, 0x0, 0x20000004, &(0x7f0000000080)={0xa, 0x4e22}, 0x1c) (async) recvfrom$inet6(r4, &(0x7f0000000000)=""/26, 0x1a, 0x40004050, &(0x7f0000001880)={0xa, 0x0, 0x0, @rand_addr=' \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02'}, 0x1c) (async) ioctl$sock_SIOCGIFVLAN_ADD_VLAN_CMD(r4, 0x8982, &(0x7f0000000100)={0x0, 'veth0_to_batadv\x00', {}, 0x33a}) getsockname$packet(r3, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r5, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) (async) sendmsg$nl_route(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x20, 0x10, 0x200, 0x70bd29, 0x25dfdbfb}, 0x20}}, 0x0) [ 1989.533543][T28722] bond929: (slave bridge896): making interface the new active one [ 1989.578366][T28722] bridge896: entered promiscuous mode 01:55:39 executing program 1: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r0, 0x0) accept4(r0, 0x0, 0x0, 0x400) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r3, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r3, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r4, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000020000c0e02000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r4}]}, 0x4c}}, 0x0) [ 1989.619343][T28722] bond929: (slave bridge896): Enslaving as an active interface with an up link 01:55:39 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x1f2, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1989.680313][T28731] netlink: 'syz-executor.0': attribute type 1 has an invalid length. [ 1989.807600][T28731] bond1034: entered promiscuous mode [ 1989.820328][T28731] 8021q: adding VLAN 0 to HW filter on device bond1034 [ 1989.966129][T28734] bond1034: (slave bridge998): making interface the new active one [ 1989.989986][T28734] bridge998: entered promiscuous mode 01:55:39 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x4000, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1990.008736][T28734] bond1034: (slave bridge998): Enslaving as an active interface with an up link [ 1990.020644][T28738] netlink: 'syz-executor.3': attribute type 1 has an invalid length. [ 1990.104732][T28738] bond982: entered promiscuous mode [ 1990.119694][T28738] 8021q: adding VLAN 0 to HW filter on device bond982 [ 1990.296980][T28740] bond982: (slave bridge940): making interface the new active one [ 1990.321046][T28740] bridge940: entered promiscuous mode [ 1990.350749][T28740] bond982: (slave bridge940): Enslaving as an active interface with an up link 01:55:40 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0xf202, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) 01:55:40 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x0) r1 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r1, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r1, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r2, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x262, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r2}]}, 0x4c}}, 0x0) [ 1990.457978][T28763] netlink: 12 bytes leftover after parsing attributes in process `syz-executor.1'. 01:55:40 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f00000000c0)={'#! ', './file0'}, 0xfffffdb4) [ 1990.524281][T28763] bond533: entered promiscuous mode [ 1990.530530][T28763] 8021q: adding VLAN 0 to HW filter on device bond533 [ 1990.622746][T28765] bridge472: entered promiscuous mode [ 1990.643456][T28765] bond533: (slave bridge472): Enslaving as an active interface with an up link [ 1990.669656][T28770] netlink: 'syz-executor.2': attribute type 1 has an invalid length. 01:55:40 executing program 1: socket$inet6_tcp(0xa, 0x1, 0x0) (async) r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r0, 0x0) (async) listen(r0, 0x0) accept4(r0, 0x0, 0x0, 0x400) r1 = socket$netlink(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x0) (async) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r3, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r3, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r4, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000020000c0e02000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r4}]}, 0x4c}}, 0x0) 01:55:40 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x1fa, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1990.723339][T28770] workqueue: Failed to create a rescuer kthread for wq "bond930": -EINTR [ 1990.921769][T28776] bond1035: entered promiscuous mode [ 1991.007031][T28776] 8021q: adding VLAN 0 to HW filter on device bond1035 [ 1991.067403][T28777] bond1035: (slave bridge999): making interface the new active one [ 1991.095240][T28777] bridge999: entered promiscuous mode [ 1991.110009][T28777] bond1035: (slave bridge999): Enslaving as an active interface with an up link 01:55:40 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x4800, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1991.189202][T28779] bond983: entered promiscuous mode [ 1991.228837][T28779] 8021q: adding VLAN 0 to HW filter on device bond983 [ 1991.377086][T28782] bond983: (slave bridge941): making interface the new active one [ 1991.435977][T28782] bridge941: entered promiscuous mode [ 1991.522100][T28782] bond983: (slave bridge941): Enslaving as an active interface with an up link 01:55:41 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0xf203, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) 01:55:41 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000000)='cpuacct.usage_percpu\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f00000000c0)={'#! ', './file0'}, 0xb) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000400)='cgroup.controllers\x00', 0x275a, 0x0) write$binfmt_script(r1, &(0x7f00000003c0)=ANY=[], 0x208e24b) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r1, 0x0) openat$cgroup_ro(r1, &(0x7f0000000040)='blkio.bfq.io_serviced\x00', 0x0, 0x0) 01:55:41 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) socket$netlink(0x10, 0x3, 0x0) r1 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r1, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r1, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(0xffffffffffffffff, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r2, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x262, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r2}]}, 0x4c}}, 0x0) [ 1991.630252][T28792] netlink: 12 bytes leftover after parsing attributes in process `syz-executor.1'. 01:55:41 executing program 1: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r0, 0x0) (async) accept4(r0, 0x0, 0x0, 0x400) (async) r1 = socket$netlink(0x10, 0x3, 0x0) (async) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r3, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) (async, rerun: 64) getsockname$packet(r3, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) (rerun: 64) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r4, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000020000c0e02000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r4}]}, 0x4c}}, 0x0) [ 1991.670342][T28792] workqueue: Failed to create a rescuer kthread for wq "bond534": -EINTR 01:55:41 executing program 1: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r0, 0x0) accept4(r0, 0x0, 0x0, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r3, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) ioctl$BTRFS_IOC_INO_PATHS(r0, 0xc0389423, &(0x7f0000000180)={0xffffffffffffffff, 0x10, [0x309, 0x7fff, 0x8, 0xaee], &(0x7f0000000140)=[0x0, 0x0]}) listen(r3, 0x0) accept4(r3, 0x0, 0x0, 0x0) ioctl$sock_inet_SIOCSIFFLAGS(r3, 0x8914, &(0x7f0000000040)={'team_slave_0\x00', 0x4000}) setsockopt$inet6_tcp_int(r3, 0x6, 0x11, &(0x7f0000000100)=0x7fff, 0x4) r4 = socket(0x10, 0x803, 0x0) bind$inet6(r4, &(0x7f00000001c0)={0xa, 0x4e20, 0x7fff, @dev={0xfe, 0x80, '\x00', 0x20}, 0x8}, 0x1c) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r4, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r5, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x4c}}, 0x0) 01:55:41 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000000)='cpuacct.usage_percpu\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f00000000c0)={'#! ', './file0'}, 0xb) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000400)='cgroup.controllers\x00', 0x275a, 0x0) write$binfmt_script(r1, &(0x7f00000003c0)=ANY=[], 0x208e24b) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r1, 0x0) openat$cgroup_ro(r1, &(0x7f0000000040)='blkio.bfq.io_serviced\x00', 0x0, 0x0) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000000)='cpuacct.usage_percpu\x00', 0x275a, 0x0) (async) write$binfmt_script(r0, &(0x7f00000000c0)={'#! ', './file0'}, 0xb) (async) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000400)='cgroup.controllers\x00', 0x275a, 0x0) (async) write$binfmt_script(r1, &(0x7f00000003c0)=ANY=[], 0x208e24b) (async) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r1, 0x0) (async) openat$cgroup_ro(r1, &(0x7f0000000040)='blkio.bfq.io_serviced\x00', 0x0, 0x0) (async) 01:55:41 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x1fe, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1991.830077][T28797] workqueue: Failed to create a rescuer kthread for wq "bond930": -EINTR [ 1992.078714][T28802] bond1036: entered promiscuous mode [ 1992.110448][T28802] 8021q: adding VLAN 0 to HW filter on device bond1036 01:55:41 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x4a00, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1992.162448][T28804] bond1036: (slave bridge1000): making interface the new active one [ 1992.170672][T28804] bridge1000: entered promiscuous mode [ 1992.180335][T28804] bond1036: (slave bridge1000): Enslaving as an active interface with an up link 01:55:41 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000000)='cpuacct.usage_percpu\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f00000000c0)={'#! ', './file0'}, 0xb) r1 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000400)='cgroup.controllers\x00', 0x275a, 0x0) write$binfmt_script(r1, &(0x7f00000003c0)=ANY=[], 0x208e24b) (async) write$binfmt_script(r1, &(0x7f00000003c0)=ANY=[], 0x208e24b) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r1, 0x0) (async) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r1, 0x0) openat$cgroup_ro(r1, &(0x7f0000000040)='blkio.bfq.io_serviced\x00', 0x0, 0x0) [ 1992.240657][T28806] bond984: entered promiscuous mode [ 1992.270443][T28806] 8021q: adding VLAN 0 to HW filter on device bond984 01:55:42 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000000)=0x1baf, 0x12) write$binfmt_script(r0, &(0x7f0000000100)=ANY=[@ANYBLOB="233246ddecd78eb284300aee04b2cc31ddf4bd5b587e67880536e9f8d5c29e0dd1a7ff0ab8f304008c01fd3d9616b61a92782cce5aa2d68a51092cb899ff4c1613c59d268e462c18d3a57ab1ff7ea3f396b381c1c1a2c772bd83568407751c536212fd4bd72c7ec70f535b3aba7ef3e118c1e14f545d70e6682e964197cd7d573b51635ccfa6dfad8b7f82a771e3b306e218e497c84650"], 0xb) [ 1992.417815][T28808] bond984: (slave bridge942): making interface the new active one [ 1992.438797][T28808] bridge942: entered promiscuous mode [ 1992.484930][T28808] bond984: (slave bridge942): Enslaving as an active interface with an up link 01:55:42 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0xf602, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) 01:55:42 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000000)=0x1baf, 0x12) write$binfmt_script(r0, &(0x7f0000000100)=ANY=[@ANYBLOB="233246ddecd78eb284300aee04b2cc31ddf4bd5b587e67880536e9f8d5c29e0dd1a7ff0ab8f304008c01fd3d9616b61a92782cce5aa2d68a51092cb899ff4c1613c59d268e462c18d3a57ab1ff7ea3f396b381c1c1a2c772bd83568407751c536212fd4bd72c7ec70f535b3aba7ef3e118c1e14f545d70e6682e964197cd7d573b51635ccfa6dfad8b7f82a771e3b306e218e497c84650"], 0xb) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) (async) write$cgroup_int(r0, &(0x7f0000000000)=0x1baf, 0x12) (async) write$binfmt_script(r0, &(0x7f0000000100)=ANY=[@ANYBLOB="233246ddecd78eb284300aee04b2cc31ddf4bd5b587e67880536e9f8d5c29e0dd1a7ff0ab8f304008c01fd3d9616b61a92782cce5aa2d68a51092cb899ff4c1613c59d268e462c18d3a57ab1ff7ea3f396b381c1c1a2c772bd83568407751c536212fd4bd72c7ec70f535b3aba7ef3e118c1e14f545d70e6682e964197cd7d573b51635ccfa6dfad8b7f82a771e3b306e218e497c84650"], 0xb) (async) 01:55:42 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$cgroup_int(r0, &(0x7f0000000000)=0x1baf, 0x12) (async) write$cgroup_int(r0, &(0x7f0000000000)=0x1baf, 0x12) write$binfmt_script(r0, &(0x7f0000000100)=ANY=[@ANYBLOB="233246ddecd78eb284300aee04b2cc31ddf4bd5b587e67880536e9f8d5c29e0dd1a7ff0ab8f304008c01fd3d9616b61a92782cce5aa2d68a51092cb899ff4c1613c59d268e462c18d3a57ab1ff7ea3f396b381c1c1a2c772bd83568407751c536212fd4bd72c7ec70f535b3aba7ef3e118c1e14f545d70e6682e964197cd7d573b51635ccfa6dfad8b7f82a771e3b306e218e497c84650"], 0xb) 01:55:42 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x262, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) 01:55:42 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f0000000000)=ANY=[@ANYBLOB="469513314e9f36050000005aad23698765440a0000000000"], 0xb) [ 1992.683411][T28833] bond930: entered promiscuous mode [ 1992.695910][T28833] 8021q: adding VLAN 0 to HW filter on device bond930 [ 1992.789162][T28835] bond930: (slave bridge897): making interface the new active one [ 1992.799328][T28835] bridge897: entered promiscuous mode 01:55:42 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f0000000000)=ANY=[@ANYBLOB="469513314e9f36050000005aad23698765440a0000000000"], 0xb) [ 1992.830449][T28835] bond930: (slave bridge897): Enslaving as an active interface with an up link 01:55:42 executing program 1: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r0, 0x0) accept4(r0, 0x0, 0x0, 0x0) (async, rerun: 32) r1 = socket$netlink(0x10, 0x3, 0x0) (async, rerun: 32) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r3, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) ioctl$BTRFS_IOC_INO_PATHS(r0, 0xc0389423, &(0x7f0000000180)={0xffffffffffffffff, 0x10, [0x309, 0x7fff, 0x8, 0xaee], &(0x7f0000000140)=[0x0, 0x0]}) (async) listen(r3, 0x0) (async) accept4(r3, 0x0, 0x0, 0x0) (async) ioctl$sock_inet_SIOCSIFFLAGS(r3, 0x8914, &(0x7f0000000040)={'team_slave_0\x00', 0x4000}) setsockopt$inet6_tcp_int(r3, 0x6, 0x11, &(0x7f0000000100)=0x7fff, 0x4) (async) r4 = socket(0x10, 0x803, 0x0) bind$inet6(r4, &(0x7f00000001c0)={0xa, 0x4e20, 0x7fff, @dev={0xfe, 0x80, '\x00', 0x20}, 0x8}, 0x1c) (async) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r4, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) (async) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r5, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) (async, rerun: 32) sendmsg$nl_route(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x4c}}, 0x0) (rerun: 32) 01:55:42 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x21a, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) 01:55:42 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f0000000000)=ANY=[@ANYBLOB="469513314e9f36050000005aad23698765440a0000000000"], 0xb) [ 1992.863292][T28836] workqueue: Failed to create a rescuer kthread for wq "bond534": -EINTR [ 1993.018506][T28847] bond1037: entered promiscuous mode [ 1993.041160][T28847] 8021q: adding VLAN 0 to HW filter on device bond1037 01:55:42 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x4c00, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) 01:55:42 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='cgroup.controllers\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f00000000c0)={'#! ', './file0'}, 0xb) [ 1993.084485][T28852] bond1037: (slave bridge1001): making interface the new active one [ 1993.098413][T28852] bridge1001: entered promiscuous mode [ 1993.114213][T28852] bond1037: (slave bridge1001): Enslaving as an active interface with an up link [ 1993.162828][T28859] bond985: entered promiscuous mode [ 1993.168944][T28859] 8021q: adding VLAN 0 to HW filter on device bond985 01:55:42 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='cgroup.controllers\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f00000000c0)={'#! ', './file0'}, 0xb) 01:55:42 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0xfc00, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1993.227825][T28867] bond985: (slave bridge943): making interface the new active one [ 1993.236357][T28867] bridge943: entered promiscuous mode [ 1993.246543][T28867] bond985: (slave bridge943): Enslaving as an active interface with an up link 01:55:43 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='cgroup.controllers\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f00000000c0)={'#! ', './file0'}, 0xb) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000040)='cgroup.controllers\x00', 0x275a, 0x0) (async) write$binfmt_script(r0, &(0x7f00000000c0)={'#! ', './file0'}, 0xb) (async) 01:55:43 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x262, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1993.375912][T28883] bond931: entered promiscuous mode [ 1993.382133][T28883] 8021q: adding VLAN 0 to HW filter on device bond931 01:55:43 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f0000000000)=ANY=[@ANYBLOB="f5458d1b23a9e50a2321202e2f66696c65300aff908efc265f98bb9b675d7c84b12cb7b4b46d4f8a81fad5723e282aa59c30f739bdc4adce36e871a37469f648c07b86af33bb9cf0e5f8"], 0xb) 01:55:43 executing program 1: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) (async) listen(r0, 0x0) accept4(r0, 0x0, 0x0, 0x0) (async) r1 = socket$netlink(0x10, 0x3, 0x0) (async) r2 = socket$netlink(0x10, 0x3, 0x0) (async) r3 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r3, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) ioctl$BTRFS_IOC_INO_PATHS(r0, 0xc0389423, &(0x7f0000000180)={0xffffffffffffffff, 0x10, [0x309, 0x7fff, 0x8, 0xaee], &(0x7f0000000140)=[0x0, 0x0]}) (async) listen(r3, 0x0) (async) accept4(r3, 0x0, 0x0, 0x0) (async) ioctl$sock_inet_SIOCSIFFLAGS(r3, 0x8914, &(0x7f0000000040)={'team_slave_0\x00', 0x4000}) setsockopt$inet6_tcp_int(r3, 0x6, 0x11, &(0x7f0000000100)=0x7fff, 0x4) (async) r4 = socket(0x10, 0x803, 0x0) bind$inet6(r4, &(0x7f00000001c0)={0xa, 0x4e20, 0x7fff, @dev={0xfe, 0x80, '\x00', 0x20}, 0x8}, 0x1c) (async) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r4, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r5, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x4c}}, 0x0) [ 1993.416475][T28892] bond931: (slave bridge898): making interface the new active one [ 1993.424595][T28892] bridge898: entered promiscuous mode [ 1993.435222][T28892] bond931: (slave bridge898): Enslaving as an active interface with an up link 01:55:43 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x27a, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1993.474342][T28897] bond1038: entered promiscuous mode [ 1993.481851][T28897] 8021q: adding VLAN 0 to HW filter on device bond1038 01:55:43 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f0000000000)=ANY=[@ANYBLOB="f5458d1b23a9e50a2321202e2f66696c65300aff908efc265f98bb9b675d7c84b12cb7b4b46d4f8a81fad5723e282aa59c30f739bdc4adce36e871a37469f648c07b86af33bb9cf0e5f8"], 0xb) 01:55:43 executing program 1: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) r1 = socket$inet6_tcp(0xa, 0x1, 0x0) sendto$inet6(r1, 0x0, 0x0, 0x20000004, &(0x7f0000000080)={0xa, 0x4e22}, 0x1c) recvfrom$inet6(r1, &(0x7f0000000000)=""/26, 0x1a, 0x40004050, &(0x7f0000001880)={0xa, 0x0, 0x0, @rand_addr=' \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02'}, 0x1c) bind$inet6(r1, &(0x7f00000003c0)={0xa, 0x4e22, 0x0, @local}, 0x1c) listen(r0, 0x0) accept4(r0, 0x0, 0x0, 0x140000) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket$netlink(0x10, 0x3, 0x0) r4 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r4, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r3, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r5, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r2, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000140)=@newlink={0x7c, 0x10, 0xffffff1f, 0x0, 0x25dfdbfd, {}, [@IFLA_LINKINFO={0x54, 0x12, 0x0, 0x1, @erspan={{0xb}, {0x44, 0x2, 0x0, 0x1, [@IFLA_GRE_ENCAP_SPORT={0x6, 0x10, 0x4e20}, @IFLA_GRE_OKEY={0x8, 0x5, 0x200}, @IFLA_GRE_TOS={0x5, 0x9, 0x9}, @IFLA_GRE_ENCAP_TYPE={0x6, 0xe, 0x3}, @IFLA_GRE_IKEY={0x8, 0x4, 0xa9}, @IFLA_GRE_LINK={0x8}, @IFLA_GRE_IKEY={0x8, 0x4, 0x80000000}, @IFLA_GRE_IKEY={0x8, 0x4, 0x3}]}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x7c}}, 0x0) r6 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000400)='cgroup.controllers\x00', 0x275a, 0x0) write$binfmt_script(r6, &(0x7f00000003c0)=ANY=[], 0x208e24b) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r6, 0x0) openat$cgroup_ro(r6, &(0x7f0000000100)='pids.current\x00', 0x0, 0x0) 01:55:43 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f0000000000)=ANY=[@ANYBLOB="f5458d1b23a9e50a2321202e2f66696c65300aff908efc265f98bb9b675d7c84b12cb7b4b46d4f8a81fad5723e282aa59c30f739bdc4adce36e871a37469f648c07b86af33bb9cf0e5f8"], 0xb) [ 1993.669731][T28901] bond1038: (slave bridge1002): making interface the new active one [ 1993.693373][T28901] bridge1002: entered promiscuous mode [ 1993.714705][T28901] bond1038: (slave bridge1002): Enslaving as an active interface with an up link [ 1993.739095][T28904] validate_nla: 11 callbacks suppressed [ 1993.739114][T28904] netlink: 'syz-executor.3': attribute type 1 has an invalid length. 01:55:43 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x5203, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) 01:55:43 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f00000000c0)=ANY=[@ANYBLOB="000000000021761367c44d3d9294f82a127cd149b767599974aa796375a8136fc2992e4ff7c38d17969b35e15b8b18945f6b2c3732fcc69fc7fda6e80c52f43c8ebc018e46d942bea34a2600000093c339b57f3998ccc39f9f2976cf9b29b9338533f1c5dbd113ad12fd991715"], 0xb) r1 = socket$nl_route(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r1, &(0x7f000000c140)={0x0, 0x0, &(0x7f00000002c0)={&(0x7f00000007c0)=ANY=[@ANYBLOB="280029aa2c332d5bea9bad39bf59452a34d9d8000030a2b55582f5fd1e6fdfdec12aa9c829dedc5b40cb985464b366067f5b3489057398384641c619000100be085754872b39582adc020000006486764c95b1a0d000c4c30300112b7cd9776bf684254a0625a1f185eefcd79e03ee1710673a0a30af7c8713cbeef788109ce3a57a779d05208325ffaaba742b71c071c7597d4c55bc07b4fa208fef7e6db9f1bef859d7360beea439da5b6cd2e3916253d9a88d84eb1121f46d4882db8bd52f042df9def409cd02dfcc0e1f8707ff433700b14433fd9e9d21e674241f64c91664afe4edb300cdccd800a51c5d1a5b7ac7200d62ebf9947ed9d4e50cd6b98e93e536d65f5436fe2000000080fbe4ec0846896a11a1a265487beef0048e040000005a4490a91e4c32f69a0f7f9d8872d3fbbd96f2f280fd1ae1409438e5c5fff965c35fb8af18e83f5cf8581035a6e3986840fed343"], 0x28}}, 0x0) write$binfmt_script(r1, &(0x7f0000000000)={'#! ', './file0', [{0x20, 'memory.events\x00'}, {}, {0x20, 'memory.events\x00'}], 0xa, "ec021ae7f318236cb6c29fc816736aba3b7149"}, 0x3d) 01:55:43 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f00000000c0)=ANY=[@ANYBLOB="000000000021761367c44d3d9294f82a127cd149b767599974aa796375a8136fc2992e4ff7c38d17969b35e15b8b18945f6b2c3732fcc69fc7fda6e80c52f43c8ebc018e46d942bea34a2600000093c339b57f3998ccc39f9f2976cf9b29b9338533f1c5dbd113ad12fd991715"], 0xb) (async) write$binfmt_script(r0, &(0x7f00000000c0)=ANY=[@ANYBLOB="000000000021761367c44d3d9294f82a127cd149b767599974aa796375a8136fc2992e4ff7c38d17969b35e15b8b18945f6b2c3732fcc69fc7fda6e80c52f43c8ebc018e46d942bea34a2600000093c339b57f3998ccc39f9f2976cf9b29b9338533f1c5dbd113ad12fd991715"], 0xb) r1 = socket$nl_route(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r1, &(0x7f000000c140)={0x0, 0x0, &(0x7f00000002c0)={&(0x7f00000007c0)=ANY=[@ANYBLOB="280029aa2c332d5bea9bad39bf59452a34d9d8000030a2b55582f5fd1e6fdfdec12aa9c829dedc5b40cb985464b366067f5b3489057398384641c619000100be085754872b39582adc020000006486764c95b1a0d000c4c30300112b7cd9776bf684254a0625a1f185eefcd79e03ee1710673a0a30af7c8713cbeef788109ce3a57a779d05208325ffaaba742b71c071c7597d4c55bc07b4fa208fef7e6db9f1bef859d7360beea439da5b6cd2e3916253d9a88d84eb1121f46d4882db8bd52f042df9def409cd02dfcc0e1f8707ff433700b14433fd9e9d21e674241f64c91664afe4edb300cdccd800a51c5d1a5b7ac7200d62ebf9947ed9d4e50cd6b98e93e536d65f5436fe2000000080fbe4ec0846896a11a1a265487beef0048e040000005a4490a91e4c32f69a0f7f9d8872d3fbbd96f2f280fd1ae1409438e5c5fff965c35fb8af18e83f5cf8581035a6e3986840fed343"], 0x28}}, 0x0) write$binfmt_script(r1, &(0x7f0000000000)={'#! ', './file0', [{0x20, 'memory.events\x00'}, {}, {0x20, 'memory.events\x00'}], 0xa, "ec021ae7f318236cb6c29fc816736aba3b7149"}, 0x3d) [ 1993.810030][T28904] bond986: entered promiscuous mode [ 1993.824683][T28904] 8021q: adding VLAN 0 to HW filter on device bond986 01:55:43 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0xfe01, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) 01:55:43 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f00000000c0)=ANY=[@ANYBLOB="000000000021761367c44d3d9294f82a127cd149b767599974aa796375a8136fc2992e4ff7c38d17969b35e15b8b18945f6b2c3732fcc69fc7fda6e80c52f43c8ebc018e46d942bea34a2600000093c339b57f3998ccc39f9f2976cf9b29b9338533f1c5dbd113ad12fd991715"], 0xb) (async) r1 = socket$nl_route(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r1, &(0x7f000000c140)={0x0, 0x0, &(0x7f00000002c0)={&(0x7f00000007c0)=ANY=[@ANYBLOB="280029aa2c332d5bea9bad39bf59452a34d9d8000030a2b55582f5fd1e6fdfdec12aa9c829dedc5b40cb985464b366067f5b3489057398384641c619000100be085754872b39582adc020000006486764c95b1a0d000c4c30300112b7cd9776bf684254a0625a1f185eefcd79e03ee1710673a0a30af7c8713cbeef788109ce3a57a779d05208325ffaaba742b71c071c7597d4c55bc07b4fa208fef7e6db9f1bef859d7360beea439da5b6cd2e3916253d9a88d84eb1121f46d4882db8bd52f042df9def409cd02dfcc0e1f8707ff433700b14433fd9e9d21e674241f64c91664afe4edb300cdccd800a51c5d1a5b7ac7200d62ebf9947ed9d4e50cd6b98e93e536d65f5436fe2000000080fbe4ec0846896a11a1a265487beef0048e040000005a4490a91e4c32f69a0f7f9d8872d3fbbd96f2f280fd1ae1409438e5c5fff965c35fb8af18e83f5cf8581035a6e3986840fed343"], 0x28}}, 0x0) write$binfmt_script(r1, &(0x7f0000000000)={'#! ', './file0', [{0x20, 'memory.events\x00'}, {}, {0x20, 'memory.events\x00'}], 0xa, "ec021ae7f318236cb6c29fc816736aba3b7149"}, 0x3d) [ 1993.899357][T28910] bond986: (slave bridge944): making interface the new active one [ 1993.910064][T28910] bridge944: entered promiscuous mode [ 1993.919939][T28910] bond986: (slave bridge944): Enslaving as an active interface with an up link [ 1993.989426][T28928] netlink: 'syz-executor.2': attribute type 1 has an invalid length. 01:55:43 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, 0x0, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x262, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) 01:55:43 executing program 5: r0 = accept(0xffffffffffffffff, &(0x7f0000000000)=@alg, &(0x7f0000000100)=0x80) r1 = socket$inet6_sctp(0xa, 0x5, 0x84) r2 = socket$inet6_sctp(0xa, 0x5, 0x84) getsockopt$inet_sctp6_SCTP_SOCKOPT_CONNECTX3(r2, 0x84, 0x6f, &(0x7f0000000080)={0x0, 0x1c, &(0x7f0000000000)=[@in6={0xa, 0x0, 0x0, @private1}]}, &(0x7f00000000c0)=0x10) getsockopt$inet_sctp6_SCTP_GET_ASSOC_ID_LIST(r2, 0x84, 0x1d, &(0x7f0000000180)={0x1, [0x0]}, &(0x7f00000001c0)=0x8) setsockopt$inet_sctp6_SCTP_PEER_ADDR_THLDS(r1, 0x84, 0x1f, &(0x7f0000000100)={r3, @in6={{0xa, 0x0, 0x0, @empty}}}, 0x90) getsockopt$inet_sctp6_SCTP_PEER_ADDR_PARAMS(r0, 0x84, 0x9, &(0x7f0000000140)={r3, @in={{0x2, 0x4e21, @dev={0xac, 0x14, 0x14, 0x2d}}}, 0xffff6a46, 0x0, 0x1, 0x81, 0x3d, 0x1f8, 0x4}, &(0x7f0000000200)=0x9c) r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r4, &(0x7f00000000c0)={'#! ', './file0'}, 0xb) [ 1994.089357][T28928] bond932: entered promiscuous mode [ 1994.107068][T28928] 8021q: adding VLAN 0 to HW filter on device bond932 [ 1994.178638][T28932] bond932: (slave bridge899): making interface the new active one [ 1994.186922][T28932] bridge899: entered promiscuous mode [ 1994.200384][T28932] bond932: (slave bridge899): Enslaving as an active interface with an up link [ 1994.210302][T28939] netlink: 'syz-executor.1': attribute type 1 has an invalid length. 01:55:43 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x2b2, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1994.320766][T28939] bond534: entered promiscuous mode [ 1994.336569][T28939] 8021q: adding VLAN 0 to HW filter on device bond534 [ 1994.377716][T28951] netlink: 'syz-executor.0': attribute type 1 has an invalid length. 01:55:44 executing program 1: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) r1 = socket$inet6_tcp(0xa, 0x1, 0x0) sendto$inet6(r1, 0x0, 0x0, 0x20000004, &(0x7f0000000080)={0xa, 0x4e22}, 0x1c) recvfrom$inet6(r1, &(0x7f0000000000)=""/26, 0x1a, 0x40004050, &(0x7f0000001880)={0xa, 0x0, 0x0, @rand_addr=' \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02'}, 0x1c) bind$inet6(r1, &(0x7f00000003c0)={0xa, 0x4e22, 0x0, @local}, 0x1c) listen(r0, 0x0) accept4(r0, 0x0, 0x0, 0x140000) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket$netlink(0x10, 0x3, 0x0) r4 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r4, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r3, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r5, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r2, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000140)=@newlink={0x7c, 0x10, 0xffffff1f, 0x0, 0x25dfdbfd, {}, [@IFLA_LINKINFO={0x54, 0x12, 0x0, 0x1, @erspan={{0xb}, {0x44, 0x2, 0x0, 0x1, [@IFLA_GRE_ENCAP_SPORT={0x6, 0x10, 0x4e20}, @IFLA_GRE_OKEY={0x8, 0x5, 0x200}, @IFLA_GRE_TOS={0x5, 0x9, 0x9}, @IFLA_GRE_ENCAP_TYPE={0x6, 0xe, 0x3}, @IFLA_GRE_IKEY={0x8, 0x4, 0xa9}, @IFLA_GRE_LINK={0x8}, @IFLA_GRE_IKEY={0x8, 0x4, 0x80000000}, @IFLA_GRE_IKEY={0x8, 0x4, 0x3}]}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x7c}}, 0x0) r6 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000400)='cgroup.controllers\x00', 0x275a, 0x0) write$binfmt_script(r6, &(0x7f00000003c0)=ANY=[], 0x208e24b) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r6, 0x0) openat$cgroup_ro(r6, &(0x7f0000000100)='pids.current\x00', 0x0, 0x0) socket$inet6_tcp(0xa, 0x1, 0x0) (async) socket$inet6_tcp(0xa, 0x1, 0x0) (async) sendto$inet6(r1, 0x0, 0x0, 0x20000004, &(0x7f0000000080)={0xa, 0x4e22}, 0x1c) (async) recvfrom$inet6(r1, &(0x7f0000000000)=""/26, 0x1a, 0x40004050, &(0x7f0000001880)={0xa, 0x0, 0x0, @rand_addr=' \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02'}, 0x1c) (async) bind$inet6(r1, &(0x7f00000003c0)={0xa, 0x4e22, 0x0, @local}, 0x1c) (async) listen(r0, 0x0) (async) accept4(r0, 0x0, 0x0, 0x140000) (async) socket$netlink(0x10, 0x3, 0x0) (async) socket$netlink(0x10, 0x3, 0x0) (async) socket(0x10, 0x803, 0x0) (async) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r4, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) (async) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) (async) sendmsg$nl_route(r3, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r5, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) (async) sendmsg$nl_route(r2, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000140)=@newlink={0x7c, 0x10, 0xffffff1f, 0x0, 0x25dfdbfd, {}, [@IFLA_LINKINFO={0x54, 0x12, 0x0, 0x1, @erspan={{0xb}, {0x44, 0x2, 0x0, 0x1, [@IFLA_GRE_ENCAP_SPORT={0x6, 0x10, 0x4e20}, @IFLA_GRE_OKEY={0x8, 0x5, 0x200}, @IFLA_GRE_TOS={0x5, 0x9, 0x9}, @IFLA_GRE_ENCAP_TYPE={0x6, 0xe, 0x3}, @IFLA_GRE_IKEY={0x8, 0x4, 0xa9}, @IFLA_GRE_LINK={0x8}, @IFLA_GRE_IKEY={0x8, 0x4, 0x80000000}, @IFLA_GRE_IKEY={0x8, 0x4, 0x3}]}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x7c}}, 0x0) (async) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000400)='cgroup.controllers\x00', 0x275a, 0x0) (async) write$binfmt_script(r6, &(0x7f00000003c0)=ANY=[], 0x208e24b) (async) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r6, 0x0) (async) openat$cgroup_ro(r6, &(0x7f0000000100)='pids.current\x00', 0x0, 0x0) (async) [ 1994.464965][T28951] bond1039: entered promiscuous mode [ 1994.483192][T28951] 8021q: adding VLAN 0 to HW filter on device bond1039 [ 1994.590713][T28956] bond1039: (slave bridge1003): making interface the new active one [ 1994.621113][T28956] bridge1003: entered promiscuous mode [ 1994.651981][T28956] bond1039: (slave bridge1003): Enslaving as an active interface with an up link [ 1994.663326][T28959] netlink: 'syz-executor.3': attribute type 1 has an invalid length. 01:55:44 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x5401, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1994.710046][T28959] bond987: entered promiscuous mode [ 1994.715935][T28959] 8021q: adding VLAN 0 to HW filter on device bond987 01:55:44 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0xfeff, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1994.805343][T28963] bond987: (slave bridge945): making interface the new active one [ 1994.814038][T28963] bridge945: entered promiscuous mode [ 1994.824350][T28963] bond987: (slave bridge945): Enslaving as an active interface with an up link 01:55:44 executing program 5: r0 = accept(0xffffffffffffffff, &(0x7f0000000000)=@alg, &(0x7f0000000100)=0x80) r1 = socket$inet6_sctp(0xa, 0x5, 0x84) r2 = socket$inet6_sctp(0xa, 0x5, 0x84) getsockopt$inet_sctp6_SCTP_SOCKOPT_CONNECTX3(r2, 0x84, 0x6f, &(0x7f0000000080)={0x0, 0x1c, &(0x7f0000000000)=[@in6={0xa, 0x0, 0x0, @private1}]}, &(0x7f00000000c0)=0x10) getsockopt$inet_sctp6_SCTP_GET_ASSOC_ID_LIST(r2, 0x84, 0x1d, &(0x7f0000000180)={0x1, [0x0]}, &(0x7f00000001c0)=0x8) setsockopt$inet_sctp6_SCTP_PEER_ADDR_THLDS(r1, 0x84, 0x1f, &(0x7f0000000100)={r3, @in6={{0xa, 0x0, 0x0, @empty}}}, 0x90) getsockopt$inet_sctp6_SCTP_PEER_ADDR_PARAMS(r0, 0x84, 0x9, &(0x7f0000000140)={r3, @in={{0x2, 0x4e21, @dev={0xac, 0x14, 0x14, 0x2d}}}, 0xffff6a46, 0x0, 0x1, 0x81, 0x3d, 0x1f8, 0x4}, &(0x7f0000000200)=0x9c) r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r4, &(0x7f00000000c0)={'#! ', './file0'}, 0xb) accept(0xffffffffffffffff, &(0x7f0000000000)=@alg, &(0x7f0000000100)=0x80) (async) socket$inet6_sctp(0xa, 0x5, 0x84) (async) socket$inet6_sctp(0xa, 0x5, 0x84) (async) getsockopt$inet_sctp6_SCTP_SOCKOPT_CONNECTX3(r2, 0x84, 0x6f, &(0x7f0000000080)={0x0, 0x1c, &(0x7f0000000000)=[@in6={0xa, 0x0, 0x0, @private1}]}, &(0x7f00000000c0)=0x10) (async) getsockopt$inet_sctp6_SCTP_GET_ASSOC_ID_LIST(r2, 0x84, 0x1d, &(0x7f0000000180)={0x1, [0x0]}, &(0x7f00000001c0)=0x8) (async) setsockopt$inet_sctp6_SCTP_PEER_ADDR_THLDS(r1, 0x84, 0x1f, &(0x7f0000000100)={r3, @in6={{0xa, 0x0, 0x0, @empty}}}, 0x90) (async) getsockopt$inet_sctp6_SCTP_PEER_ADDR_PARAMS(r0, 0x84, 0x9, &(0x7f0000000140)={r3, @in={{0x2, 0x4e21, @dev={0xac, 0x14, 0x14, 0x2d}}}, 0xffff6a46, 0x0, 0x1, 0x81, 0x3d, 0x1f8, 0x4}, &(0x7f0000000200)=0x9c) (async) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) (async) write$binfmt_script(r4, &(0x7f00000000c0)={'#! ', './file0'}, 0xb) (async) [ 1994.937029][T28974] netlink: 'syz-executor.2': attribute type 1 has an invalid length. 01:55:44 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, 0x0}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x262, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1994.979406][T28974] bond933: entered promiscuous mode [ 1994.987644][T28974] 8021q: adding VLAN 0 to HW filter on device bond933 01:55:44 executing program 5: r0 = accept(0xffffffffffffffff, &(0x7f0000000000)=@alg, &(0x7f0000000100)=0x80) r1 = socket$inet6_sctp(0xa, 0x5, 0x84) r2 = socket$inet6_sctp(0xa, 0x5, 0x84) getsockopt$inet_sctp6_SCTP_SOCKOPT_CONNECTX3(r2, 0x84, 0x6f, &(0x7f0000000080)={0x0, 0x1c, &(0x7f0000000000)=[@in6={0xa, 0x0, 0x0, @private1}]}, &(0x7f00000000c0)=0x10) (async) getsockopt$inet_sctp6_SCTP_GET_ASSOC_ID_LIST(r2, 0x84, 0x1d, &(0x7f0000000180)={0x1, [0x0]}, &(0x7f00000001c0)=0x8) setsockopt$inet_sctp6_SCTP_PEER_ADDR_THLDS(r1, 0x84, 0x1f, &(0x7f0000000100)={r3, @in6={{0xa, 0x0, 0x0, @empty}}}, 0x90) getsockopt$inet_sctp6_SCTP_PEER_ADDR_PARAMS(r0, 0x84, 0x9, &(0x7f0000000140)={r3, @in={{0x2, 0x4e21, @dev={0xac, 0x14, 0x14, 0x2d}}}, 0xffff6a46, 0x0, 0x1, 0x81, 0x3d, 0x1f8, 0x4}, &(0x7f0000000200)=0x9c) (async) r4 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r4, &(0x7f00000000c0)={'#! ', './file0'}, 0xb) [ 1995.152436][T28975] bond933: (slave bridge900): making interface the new active one [ 1995.183994][T28975] bridge900: entered promiscuous mode 01:55:44 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x2b8, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1995.197614][T28975] bond933: (slave bridge900): Enslaving as an active interface with an up link 01:55:44 executing program 1: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) r1 = socket$inet6_tcp(0xa, 0x1, 0x0) sendto$inet6(r1, 0x0, 0x0, 0x20000004, &(0x7f0000000080)={0xa, 0x4e22}, 0x1c) (async) sendto$inet6(r1, 0x0, 0x0, 0x20000004, &(0x7f0000000080)={0xa, 0x4e22}, 0x1c) recvfrom$inet6(r1, &(0x7f0000000000)=""/26, 0x1a, 0x40004050, &(0x7f0000001880)={0xa, 0x0, 0x0, @rand_addr=' \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02'}, 0x1c) (async) recvfrom$inet6(r1, &(0x7f0000000000)=""/26, 0x1a, 0x40004050, &(0x7f0000001880)={0xa, 0x0, 0x0, @rand_addr=' \x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02'}, 0x1c) bind$inet6(r1, &(0x7f00000003c0)={0xa, 0x4e22, 0x0, @local}, 0x1c) listen(r0, 0x0) accept4(r0, 0x0, 0x0, 0x140000) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket$netlink(0x10, 0x3, 0x0) socket(0x10, 0x803, 0x0) (async) r4 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r4, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r4, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r3, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r5, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) (async) sendmsg$nl_route(r3, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r5, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r2, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000140)=@newlink={0x7c, 0x10, 0xffffff1f, 0x0, 0x25dfdbfd, {}, [@IFLA_LINKINFO={0x54, 0x12, 0x0, 0x1, @erspan={{0xb}, {0x44, 0x2, 0x0, 0x1, [@IFLA_GRE_ENCAP_SPORT={0x6, 0x10, 0x4e20}, @IFLA_GRE_OKEY={0x8, 0x5, 0x200}, @IFLA_GRE_TOS={0x5, 0x9, 0x9}, @IFLA_GRE_ENCAP_TYPE={0x6, 0xe, 0x3}, @IFLA_GRE_IKEY={0x8, 0x4, 0xa9}, @IFLA_GRE_LINK={0x8}, @IFLA_GRE_IKEY={0x8, 0x4, 0x80000000}, @IFLA_GRE_IKEY={0x8, 0x4, 0x3}]}}}, @IFLA_MASTER={0x8, 0xa, r5}]}, 0x7c}}, 0x0) openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000400)='cgroup.controllers\x00', 0x275a, 0x0) (async) r6 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000400)='cgroup.controllers\x00', 0x275a, 0x0) write$binfmt_script(r6, &(0x7f00000003c0)=ANY=[], 0x208e24b) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r6, 0x0) (async) mmap(&(0x7f0000000000/0xb36000)=nil, 0xb36000, 0x2, 0x28011, r6, 0x0) openat$cgroup_ro(r6, &(0x7f0000000100)='pids.current\x00', 0x0, 0x0) [ 1995.239756][T28979] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 1995.262501][T28979] workqueue: Failed to create a rescuer kthread for wq "bond535": -EINTR [ 1995.263545][T28986] netlink: 'syz-executor.0': attribute type 1 has an invalid length. [ 1995.367183][T28986] bond1040: entered promiscuous mode [ 1995.374549][T28986] 8021q: adding VLAN 0 to HW filter on device bond1040 01:55:45 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x5612, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1995.481670][T28987] bond1040: (slave bridge1004): making interface the new active one [ 1995.490908][T28987] bridge1004: entered promiscuous mode [ 1995.504252][T28987] bond1040: (slave bridge1004): Enslaving as an active interface with an up link [ 1995.514312][T28990] netlink: 'syz-executor.3': attribute type 1 has an invalid length. [ 1995.617476][T28990] bond988: entered promiscuous mode [ 1995.643046][T28990] 8021q: adding VLAN 0 to HW filter on device bond988 [ 1995.769767][T28995] bond988: (slave bridge946): making interface the new active one [ 1995.778083][T28995] bridge946: entered promiscuous mode [ 1995.803706][T28995] bond988: (slave bridge946): Enslaving as an active interface with an up link 01:55:45 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0xff7f, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) 01:55:45 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, 0x0}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x262, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1995.923444][T29012] netlink: 'syz-executor.2': attribute type 1 has an invalid length. [ 1995.958414][T29012] bond934: entered promiscuous mode [ 1995.964954][T29012] 8021q: adding VLAN 0 to HW filter on device bond934 01:55:45 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f00000000c0)=ANY=[@ANYBLOB="2321202e2f66696c65140a"], 0xb) r1 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000040), r0) sendmsg$NL80211_CMD_START_AP(r0, &(0x7f00000001c0)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x800000}, 0xc, &(0x7f0000000180)={&(0x7f0000000100)={0x6c, r1, 0x10, 0x70bd25, 0x25dfdbfc, {{}, {@void, @val={0xc, 0x99, {0x1952, 0x42}}}}, [@crypto_settings=[@NL80211_ATTR_WPA_VERSIONS={0x8, 0x4b, 0x3}, @NL80211_ATTR_AKM_SUITES={0x14, 0x4c, [0xfac03, 0xfac0c, 0xfac05, 0xfac09]}, @NL80211_ATTR_WPA_VERSIONS={0x8, 0x4b, 0x2}], @NL80211_ATTR_HE_OBSS_PD={0x28, 0x117, 0x0, 0x1, [@NL80211_HE_OBSS_PD_ATTR_BSS_COLOR_BITMAP={0xc, 0x4, "4a6bef26963078e8"}, @NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET={0x5, 0x1, 0xa}, @NL80211_HE_OBSS_PD_ATTR_BSS_COLOR_BITMAP={0xc, 0x4, "284afc7c4879ab73"}]}]}, 0x6c}, 0x1, 0x0, 0x0, 0xc0c1}, 0x10) 01:55:45 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f00000000c0)=ANY=[@ANYBLOB="2321202e2f66696c65140a"], 0xb) r1 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000040), r0) sendmsg$NL80211_CMD_START_AP(r0, &(0x7f00000001c0)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x800000}, 0xc, &(0x7f0000000180)={&(0x7f0000000100)={0x6c, r1, 0x10, 0x70bd25, 0x25dfdbfc, {{}, {@void, @val={0xc, 0x99, {0x1952, 0x42}}}}, [@crypto_settings=[@NL80211_ATTR_WPA_VERSIONS={0x8, 0x4b, 0x3}, @NL80211_ATTR_AKM_SUITES={0x14, 0x4c, [0xfac03, 0xfac0c, 0xfac05, 0xfac09]}, @NL80211_ATTR_WPA_VERSIONS={0x8, 0x4b, 0x2}], @NL80211_ATTR_HE_OBSS_PD={0x28, 0x117, 0x0, 0x1, [@NL80211_HE_OBSS_PD_ATTR_BSS_COLOR_BITMAP={0xc, 0x4, "4a6bef26963078e8"}, @NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET={0x5, 0x1, 0xa}, @NL80211_HE_OBSS_PD_ATTR_BSS_COLOR_BITMAP={0xc, 0x4, "284afc7c4879ab73"}]}]}, 0x6c}, 0x1, 0x0, 0x0, 0xc0c1}, 0x10) 01:55:45 executing program 5: openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) (async) r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f00000000c0)=ANY=[@ANYBLOB="2321202e2f66696c65140a"], 0xb) r1 = syz_genetlink_get_family_id$nl80211(&(0x7f0000000040), r0) sendmsg$NL80211_CMD_START_AP(r0, &(0x7f00000001c0)={&(0x7f0000000000)={0x10, 0x0, 0x0, 0x800000}, 0xc, &(0x7f0000000180)={&(0x7f0000000100)={0x6c, r1, 0x10, 0x70bd25, 0x25dfdbfc, {{}, {@void, @val={0xc, 0x99, {0x1952, 0x42}}}}, [@crypto_settings=[@NL80211_ATTR_WPA_VERSIONS={0x8, 0x4b, 0x3}, @NL80211_ATTR_AKM_SUITES={0x14, 0x4c, [0xfac03, 0xfac0c, 0xfac05, 0xfac09]}, @NL80211_ATTR_WPA_VERSIONS={0x8, 0x4b, 0x2}], @NL80211_ATTR_HE_OBSS_PD={0x28, 0x117, 0x0, 0x1, [@NL80211_HE_OBSS_PD_ATTR_BSS_COLOR_BITMAP={0xc, 0x4, "4a6bef26963078e8"}, @NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET={0x5, 0x1, 0xa}, @NL80211_HE_OBSS_PD_ATTR_BSS_COLOR_BITMAP={0xc, 0x4, "284afc7c4879ab73"}]}]}, 0x6c}, 0x1, 0x0, 0x0, 0xc0c1}, 0x10) 01:55:45 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000000)='pids.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f00000000c0)=ANY=[@ANYBLOB="2321202e2f6669ff65300a"], 0xb) 01:55:45 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x2f2, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1996.148559][T29015] bond934: (slave bridge901): making interface the new active one [ 1996.165219][T29015] bridge901: entered promiscuous mode [ 1996.177523][T29015] bond934: (slave bridge901): Enslaving as an active interface with an up link 01:55:45 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000000)='pids.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f00000000c0)=ANY=[@ANYBLOB="2321202e2f6669ff65300a"], 0xb) 01:55:45 executing program 1: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r0, 0x0) accept4(r0, 0x0, 0x0, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r3, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0}}, 0x0) getsockname$packet(r3, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r4, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r4}]}, 0x4c}}, 0x0) [ 1996.206369][T29020] workqueue: Failed to create a rescuer kthread for wq "bond535": -EINTR 01:55:45 executing program 5: openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000000)='pids.events\x00', 0x275a, 0x0) (async) r0 = openat$cgroup_ro(0xffffffffffffffff, &(0x7f0000000000)='pids.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f00000000c0)=ANY=[@ANYBLOB="2321202e2f6669ff65300a"], 0xb) [ 1996.220682][T29021] workqueue: Failed to create a rescuer kthread for wq "bond535": -EINTR [ 1996.363903][T29026] bond1041: entered promiscuous mode 01:55:46 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f0000000000)=ANY=[@ANYBLOB="2321202e2f66696c65300a4af1d707b0811ec8b3901c7c111a541033bdc6b70be1e4ca034e6328c1bb4d6af0dcdd50a0d95df70fe7d76480472454aef90fe615745632d3bf60b9779fad2924f357326e8e66aa"], 0xb) r1 = openat$ppp(0xffffffffffffff9c, &(0x7f0000001740), 0x0, 0x0) ioctl$PPPIOCNEWUNIT(r1, 0xc004743e, &(0x7f0000000280)=0xffffffff) ioctl$PPPIOCSACTIVE(r1, 0x40047459, &(0x7f0000000080)={0xfffffffffffffe43, 0x0}) writev(r1, &(0x7f0000000040)=[{&(0x7f0000000000)="b2ff", 0x2}], 0x1) readv(r1, 0x0, 0x0) writev(r1, &(0x7f0000000500)=[{&(0x7f00000000c0)="1d6be0bd4a2e7dfc99a21c7097b66766cf3a27351d96d117442be494a1b4cee27564756764695c938534385ac96f732771b039bd40b0eb408177c98c9c28e7776894fca4f92555c121014064f5149cbe9b5a845abcb4e5e4ee6b6f122d0bb8324a437c38c8d971854c9ddcfbdcb66c17a1fcb4f6ffe0ef50bc4e54aab677d401ff3eff9ce16ea55676fe2fce8f0cb177a5b5f9416586ffcb47d1c65fe73d3150ed377c6895ac7c39bd936481fab26ccf87b286c6ea933988f0c6fa63931a21302f9beb4b07757018841cc54b06a4b2797902996954a44c00821151b741438ca3", 0xe0}], 0x1) readv(r1, &(0x7f0000000580)=[{&(0x7f00000001c0)=""/139, 0x8b}, {&(0x7f00000002c0)=""/192, 0xc0}, {&(0x7f0000000380)=""/71, 0x47}, {&(0x7f0000000400)=""/225, 0xe1}, {&(0x7f0000000540)=""/52, 0x34}], 0x5) mmap(&(0x7f0000ff9000/0x4000)=nil, 0x4000, 0x1, 0x2010, r1, 0x766aa000) sendfile(r1, r0, &(0x7f00000000c0)=0x8, 0x5) [ 1996.436201][T29026] 8021q: adding VLAN 0 to HW filter on device bond1041 [ 1996.570573][T29028] bond1041: (slave bridge1005): making interface the new active one [ 1996.579403][T29028] bridge1005: entered promiscuous mode [ 1996.615014][T29028] bond1041: (slave bridge1005): Enslaving as an active interface with an up link 01:55:46 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x5712, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1996.668310][T29030] bond989: entered promiscuous mode [ 1996.673927][T29030] 8021q: adding VLAN 0 to HW filter on device bond989 01:55:46 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0xfffe, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1996.737561][T29033] bond989: (slave bridge947): making interface the new active one [ 1996.754154][T29033] bridge947: entered promiscuous mode [ 1996.772784][T29033] bond989: (slave bridge947): Enslaving as an active interface with an up link 01:55:46 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, 0x0}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x262, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1996.921779][T29052] bond935: entered promiscuous mode [ 1996.932886][T29052] 8021q: adding VLAN 0 to HW filter on device bond935 01:55:46 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x300, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1997.008759][T29057] bond935: (slave bridge902): making interface the new active one [ 1997.021775][T29057] bridge902: entered promiscuous mode [ 1997.030950][T29057] bond935: (slave bridge902): Enslaving as an active interface with an up link [ 1997.104666][T29063] bond535: entered promiscuous mode [ 1997.110201][T29063] 8021q: adding VLAN 0 to HW filter on device bond535 01:55:46 executing program 1: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) (async) listen(r0, 0x0) (async) accept4(r0, 0x0, 0x0, 0x0) (async) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r3, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0}}, 0x0) getsockname$packet(r3, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r4, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) (async) sendmsg$nl_route(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r4}]}, 0x4c}}, 0x0) [ 1997.278276][T29076] bond1042: entered promiscuous mode [ 1997.306326][T29076] 8021q: adding VLAN 0 to HW filter on device bond1042 [ 1997.439066][T29077] bond1042: (slave bridge1006): making interface the new active one [ 1997.467002][T29077] bridge1006: entered promiscuous mode 01:55:47 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x5865, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1997.518497][T29077] bond1042: (slave bridge1006): Enslaving as an active interface with an up link [ 1997.592870][T29080] bond990: entered promiscuous mode [ 1997.604586][T29080] 8021q: adding VLAN 0 to HW filter on device bond990 01:55:47 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x34000, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1997.685138][T29082] bond990: (slave bridge948): making interface the new active one [ 1997.695902][T29082] bridge948: entered promiscuous mode [ 1997.713075][T29082] bond990: (slave bridge948): Enslaving as an active interface with an up link [ 1997.849089][T29088] bond936: entered promiscuous mode 01:55:47 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={0x0, 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x262, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1997.873076][T29088] 8021q: adding VLAN 0 to HW filter on device bond936 01:55:47 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f0000000000)=ANY=[@ANYBLOB="2321202e2f66696c65300a4af1d707b0811ec8b3901c7c111a541033bdc6b70be1e4ca034e6328c1bb4d6af0dcdd50a0d95df70fe7d76480472454aef90fe615745632d3bf60b9779fad2924f357326e8e66aa"], 0xb) (async) r1 = openat$ppp(0xffffffffffffff9c, &(0x7f0000001740), 0x0, 0x0) ioctl$PPPIOCNEWUNIT(r1, 0xc004743e, &(0x7f0000000280)=0xffffffff) (async) ioctl$PPPIOCSACTIVE(r1, 0x40047459, &(0x7f0000000080)={0xfffffffffffffe43, 0x0}) (async) writev(r1, &(0x7f0000000040)=[{&(0x7f0000000000)="b2ff", 0x2}], 0x1) (async) readv(r1, 0x0, 0x0) (async) writev(r1, &(0x7f0000000500)=[{&(0x7f00000000c0)="1d6be0bd4a2e7dfc99a21c7097b66766cf3a27351d96d117442be494a1b4cee27564756764695c938534385ac96f732771b039bd40b0eb408177c98c9c28e7776894fca4f92555c121014064f5149cbe9b5a845abcb4e5e4ee6b6f122d0bb8324a437c38c8d971854c9ddcfbdcb66c17a1fcb4f6ffe0ef50bc4e54aab677d401ff3eff9ce16ea55676fe2fce8f0cb177a5b5f9416586ffcb47d1c65fe73d3150ed377c6895ac7c39bd936481fab26ccf87b286c6ea933988f0c6fa63931a21302f9beb4b07757018841cc54b06a4b2797902996954a44c00821151b741438ca3", 0xe0}], 0x1) (async) readv(r1, &(0x7f0000000580)=[{&(0x7f00000001c0)=""/139, 0x8b}, {&(0x7f00000002c0)=""/192, 0xc0}, {&(0x7f0000000380)=""/71, 0x47}, {&(0x7f0000000400)=""/225, 0xe1}, {&(0x7f0000000540)=""/52, 0x34}], 0x5) (async) mmap(&(0x7f0000ff9000/0x4000)=nil, 0x4000, 0x1, 0x2010, r1, 0x766aa000) (async) sendfile(r1, r0, &(0x7f00000000c0)=0x8, 0x5) [ 1998.080551][T29089] bond936: (slave bridge903): making interface the new active one [ 1998.108420][T29089] bridge903: entered promiscuous mode 01:55:47 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x30a, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) 01:55:47 executing program 1: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) (async, rerun: 32) listen(r0, 0x0) (async, rerun: 32) accept4(r0, 0x0, 0x0, 0x0) (async) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket$netlink(0x10, 0x3, 0x0) (async) r3 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r3, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0}}, 0x0) (async) getsockname$packet(r3, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r4, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x0, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r4}]}, 0x4c}}, 0x0) [ 1998.133998][T29089] bond936: (slave bridge903): Enslaving as an active interface with an up link [ 1998.150119][T29093] workqueue: Failed to create a rescuer kthread for wq "bond536": -EINTR [ 1998.250930][T29100] bond1043: entered promiscuous mode [ 1998.287765][T29100] 8021q: adding VLAN 0 to HW filter on device bond1043 01:55:48 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x5c03, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1998.349940][T29101] bond1043: (slave bridge1007): making interface the new active one [ 1998.358835][T29101] bridge1007: entered promiscuous mode [ 1998.369246][T29101] bond1043: (slave bridge1007): Enslaving as an active interface with an up link [ 1998.421739][T29103] bond991: entered promiscuous mode [ 1998.433947][T29103] 8021q: adding VLAN 0 to HW filter on device bond991 [ 1998.480463][T29106] bond991: (slave bridge949): making interface the new active one [ 1998.488863][T29106] bridge949: entered promiscuous mode [ 1998.499232][T29106] bond991: (slave bridge949): Enslaving as an active interface with an up link 01:55:48 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x3ffff, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) 01:55:48 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={0x0, 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x262, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1998.668868][T29122] bond536: entered promiscuous mode [ 1998.675664][T29122] 8021q: adding VLAN 0 to HW filter on device bond536 [ 1998.758442][T29128] bond937: entered promiscuous mode [ 1998.767847][T29128] 8021q: adding VLAN 0 to HW filter on device bond937 [ 1998.843541][T29130] bond937: (slave bridge904): making interface the new active one [ 1998.856503][T29130] bridge904: entered promiscuous mode [ 1998.875588][T29130] bond937: (slave bridge904): Enslaving as an active interface with an up link [ 1998.887334][T29133] validate_nla: 14 callbacks suppressed 01:55:48 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x31a, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1998.887350][T29133] netlink: 'syz-executor.0': attribute type 1 has an invalid length. [ 1998.962436][T29133] bond1044: entered promiscuous mode [ 1998.970257][T29133] 8021q: adding VLAN 0 to HW filter on device bond1044 01:55:48 executing program 1: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r0, 0x0) accept4(r0, 0x0, 0x0, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r3, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r3, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r4, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000280)=ANY=[@ANYBLOB="4c0000001000090800"/20, @ANYRES32=0x0, @ANYBLOB="0000000000000000240012800b0001006272696467650000140002800500260000000000080005000000000008000a00a0fe6dbb59fafbd480dd116a69dc348e7b581cda5e7332193a4cde406786e1ae1896fbb78dcf93c76c4b334671a5725b81d5f0091793ff3617ce6978a1d9195c3316475f2cebd9077b7dba3ba369504dcff425ce47598cad9c9369aca2c5f0114a8a87324f8bc5979fc9ca7361ea739c6273434013bd7e1c", @ANYRES32=r4, @ANYBLOB], 0x4c}}, 0x0) r5 = socket$nl_route(0x10, 0x3, 0x0) sendmsg$nl_route(r5, &(0x7f00000001c0)={&(0x7f0000000100)={0x10, 0x0, 0x0, 0x4000}, 0xc, &(0x7f0000000180)={&(0x7f0000000140)=@getnetconf={0x14, 0x52, 0x801, 0x70bd27, 0x25dfdbfd, {}, ["", "", "", ""]}, 0x14}, 0x1, 0x0, 0x0, 0x4001}, 0x4000001) [ 1999.037329][T29134] bond1044: (slave bridge1008): making interface the new active one [ 1999.047417][T29134] bridge1008: entered promiscuous mode [ 1999.058587][T29134] bond1044: (slave bridge1008): Enslaving as an active interface with an up link 01:55:48 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x6000, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) 01:55:48 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f0000000000)=ANY=[@ANYBLOB="2321202e2f66696c65300a4af1d707b0811ec8b3901c7c111a541033bdc6b70be1e4ca034e6328c1bb4d6af0dcdd50a0d95df70fe7d76480472454aef90fe615745632d3bf60b9779fad2924f357326e8e66aa"], 0xb) r1 = openat$ppp(0xffffffffffffff9c, &(0x7f0000001740), 0x0, 0x0) ioctl$PPPIOCNEWUNIT(r1, 0xc004743e, &(0x7f0000000280)=0xffffffff) ioctl$PPPIOCSACTIVE(r1, 0x40047459, &(0x7f0000000080)={0xfffffffffffffe43, 0x0}) writev(r1, &(0x7f0000000040)=[{&(0x7f0000000000)="b2ff", 0x2}], 0x1) readv(r1, 0x0, 0x0) writev(r1, &(0x7f0000000500)=[{&(0x7f00000000c0)="1d6be0bd4a2e7dfc99a21c7097b66766cf3a27351d96d117442be494a1b4cee27564756764695c938534385ac96f732771b039bd40b0eb408177c98c9c28e7776894fca4f92555c121014064f5149cbe9b5a845abcb4e5e4ee6b6f122d0bb8324a437c38c8d971854c9ddcfbdcb66c17a1fcb4f6ffe0ef50bc4e54aab677d401ff3eff9ce16ea55676fe2fce8f0cb177a5b5f9416586ffcb47d1c65fe73d3150ed377c6895ac7c39bd936481fab26ccf87b286c6ea933988f0c6fa63931a21302f9beb4b07757018841cc54b06a4b2797902996954a44c00821151b741438ca3", 0xe0}], 0x1) (async) writev(r1, &(0x7f0000000500)=[{&(0x7f00000000c0)="1d6be0bd4a2e7dfc99a21c7097b66766cf3a27351d96d117442be494a1b4cee27564756764695c938534385ac96f732771b039bd40b0eb408177c98c9c28e7776894fca4f92555c121014064f5149cbe9b5a845abcb4e5e4ee6b6f122d0bb8324a437c38c8d971854c9ddcfbdcb66c17a1fcb4f6ffe0ef50bc4e54aab677d401ff3eff9ce16ea55676fe2fce8f0cb177a5b5f9416586ffcb47d1c65fe73d3150ed377c6895ac7c39bd936481fab26ccf87b286c6ea933988f0c6fa63931a21302f9beb4b07757018841cc54b06a4b2797902996954a44c00821151b741438ca3", 0xe0}], 0x1) readv(r1, &(0x7f0000000580)=[{&(0x7f00000001c0)=""/139, 0x8b}, {&(0x7f00000002c0)=""/192, 0xc0}, {&(0x7f0000000380)=""/71, 0x47}, {&(0x7f0000000400)=""/225, 0xe1}, {&(0x7f0000000540)=""/52, 0x34}], 0x5) (async) readv(r1, &(0x7f0000000580)=[{&(0x7f00000001c0)=""/139, 0x8b}, {&(0x7f00000002c0)=""/192, 0xc0}, {&(0x7f0000000380)=""/71, 0x47}, {&(0x7f0000000400)=""/225, 0xe1}, {&(0x7f0000000540)=""/52, 0x34}], 0x5) mmap(&(0x7f0000ff9000/0x4000)=nil, 0x4000, 0x1, 0x2010, r1, 0x766aa000) sendfile(r1, r0, &(0x7f00000000c0)=0x8, 0x5) (async) sendfile(r1, r0, &(0x7f00000000c0)=0x8, 0x5) [ 1999.093733][T29136] netlink: 'syz-executor.3': attribute type 1 has an invalid length. [ 1999.162858][T29136] bond992: entered promiscuous mode [ 1999.178578][T29136] 8021q: adding VLAN 0 to HW filter on device bond992 [ 1999.274842][T29139] bond992: (slave bridge950): making interface the new active one [ 1999.285346][T29139] bridge950: entered promiscuous mode [ 1999.304292][T29139] bond992: (slave bridge950): Enslaving as an active interface with an up link 01:55:49 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x40000, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) 01:55:49 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={0x0, 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x262, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1999.431627][T29146] netlink: 'syz-executor.2': attribute type 1 has an invalid length. [ 1999.506584][T29146] bond938: entered promiscuous mode [ 1999.524225][T29146] 8021q: adding VLAN 0 to HW filter on device bond938 [ 1999.645872][T29147] bond938: (slave bridge905): making interface the new active one [ 1999.654728][T29147] bridge905: entered promiscuous mode 01:55:49 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x32c, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 1999.702835][T29147] bond938: (slave bridge905): Enslaving as an active interface with an up link [ 1999.719229][T29153] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 1999.806102][T29153] bond537: entered promiscuous mode [ 1999.818521][T29153] 8021q: adding VLAN 0 to HW filter on device bond537 [ 1999.856911][T29159] netlink: 'syz-executor.0': attribute type 1 has an invalid length. 01:55:49 executing program 1: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r0, 0x0) accept4(r0, 0x0, 0x0, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket$netlink(0x10, 0x3, 0x0) r3 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r3, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r3, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r4, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000280)=ANY=[@ANYBLOB="4c0000001000090800"/20, @ANYRES32=0x0, @ANYBLOB="0000000000000000240012800b0001006272696467650000140002800500260000000000080005000000000008000a00a0fe6dbb59fafbd480dd116a69dc348e7b581cda5e7332193a4cde406786e1ae1896fbb78dcf93c76c4b334671a5725b81d5f0091793ff3617ce6978a1d9195c3316475f2cebd9077b7dba3ba369504dcff425ce47598cad9c9369aca2c5f0114a8a87324f8bc5979fc9ca7361ea739c6273434013bd7e1c", @ANYRES32=r4, @ANYBLOB], 0x4c}}, 0x0) r5 = socket$nl_route(0x10, 0x3, 0x0) sendmsg$nl_route(r5, &(0x7f00000001c0)={&(0x7f0000000100)={0x10, 0x0, 0x0, 0x4000}, 0xc, &(0x7f0000000180)={&(0x7f0000000140)=@getnetconf={0x14, 0x52, 0x801, 0x70bd27, 0x25dfdbfd, {}, ["", "", "", ""]}, 0x14}, 0x1, 0x0, 0x0, 0x4001}, 0x4000001) socket$inet6_tcp(0xa, 0x1, 0x0) (async) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) (async) listen(r0, 0x0) (async) accept4(r0, 0x0, 0x0, 0x0) (async) socket$netlink(0x10, 0x3, 0x0) (async) socket$netlink(0x10, 0x3, 0x0) (async) socket(0x10, 0x803, 0x0) (async) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r3, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) (async) getsockname$packet(r3, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) (async) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r4, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) (async) sendmsg$nl_route(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000280)=ANY=[@ANYBLOB="4c0000001000090800"/20, @ANYRES32=0x0, @ANYBLOB="0000000000000000240012800b0001006272696467650000140002800500260000000000080005000000000008000a00a0fe6dbb59fafbd480dd116a69dc348e7b581cda5e7332193a4cde406786e1ae1896fbb78dcf93c76c4b334671a5725b81d5f0091793ff3617ce6978a1d9195c3316475f2cebd9077b7dba3ba369504dcff425ce47598cad9c9369aca2c5f0114a8a87324f8bc5979fc9ca7361ea739c6273434013bd7e1c", @ANYRES32=r4, @ANYBLOB], 0x4c}}, 0x0) (async) socket$nl_route(0x10, 0x3, 0x0) (async) sendmsg$nl_route(r5, &(0x7f00000001c0)={&(0x7f0000000100)={0x10, 0x0, 0x0, 0x4000}, 0xc, &(0x7f0000000180)={&(0x7f0000000140)=@getnetconf={0x14, 0x52, 0x801, 0x70bd27, 0x25dfdbfd, {}, ["", "", "", ""]}, 0x14}, 0x1, 0x0, 0x0, 0x4001}, 0x4000001) (async) [ 1999.976732][T29159] bond1045: entered promiscuous mode [ 2000.008365][T29159] 8021q: adding VLAN 0 to HW filter on device bond1045 [ 2000.184485][T29163] bond1045: (slave bridge1009): making interface the new active one [ 2000.196498][T29163] bridge1009: entered promiscuous mode [ 2000.219385][T29163] bond1045: (slave bridge1009): Enslaving as an active interface with an up link 01:55:49 executing program 0: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x608a, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 2000.229961][T29167] netlink: 'syz-executor.3': attribute type 1 has an invalid length. 01:55:50 executing program 3: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x104cac, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) 01:55:50 executing program 4: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x262, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 2000.259445][T29167] workqueue: Failed to create a rescuer kthread for wq "bond993": -EINTR [ 2000.456864][T29176] netlink: 'syz-executor.2': attribute type 1 has an invalid length. [ 2000.579246][T29176] bond939: entered promiscuous mode [ 2000.594082][T29176] 8021q: adding VLAN 0 to HW filter on device bond939 [ 2000.719802][T29178] bond939: (slave bridge906): making interface the new active one [ 2000.749962][T29178] bridge906: entered promiscuous mode 01:55:50 executing program 2: r0 = socket$netlink(0x10, 0x3, 0x0) r1 = socket$netlink(0x10, 0x3, 0x0) r2 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r2, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r2, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r1, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r3, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r0, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000400)=@newlink={0x4c, 0x10, 0xffffff1f, 0x0, 0x330, {}, [@IFLA_LINKINFO={0x24, 0x12, 0x0, 0x1, @bridge={{0xb}, {0x14, 0x2, 0x0, 0x1, [@IFLA_BR_NF_CALL_ARPTABLES={0x5}, @IFLA_BR_STP_STATE={0x8}]}}}, @IFLA_MASTER={0x8, 0xa, r3}]}, 0x4c}}, 0x0) [ 2000.789742][T29178] bond939: (slave bridge906): Enslaving as an active interface with an up link 01:55:50 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f00000000c0)={'#! ', './file0'}, 0xb) r1 = socket$nl_route(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r1, &(0x7f000000c140)={0x0, 0x0, &(0x7f00000002c0)={&(0x7f00000007c0)=ANY=[@ANYBLOB="280029aa2c332d5bea9bad39bf59452a34d9d8000030a2b55582f5fd1e6fdfdec12aa9c829dedc5b40cb985464b366067f5b3489057398384641c619000100be085754872b39582adc020000006486764c95b1a0d000c4c30300112b7cd9776bf684254a0625a1f185eefcd79e03ee1710673a0a30af7c8713cbeef788109ce3a57a779d05208325ffaaba742b71c071c7597d4c55bc07b4fa208fef7e6db9f1bef859d7360beea439da5b6cd2e3916253d9a88d84eb1121f46d4882db8bd52f042df9def409cd02dfcc0e1f8707ff433700b14433fd9e9d21e674241f64c91664afe4edb300cdccd800a51c5d1a5b7ac7200d62ebf9947ed9d4e50cd6b98e93e536d65f5436fe2000000080fbe4ec0846896a11a1a265487beef0048e040000005a4490a91e4c32f69a0f7f9d8872d3fbbd96f2f280fd1ae1409438e5c5fff965c35fb8af18e83f5cf8581035a6e3986840fed343"], 0x28}}, 0x0) write$binfmt_script(r1, &(0x7f0000000100)={'#! ', './file0', [{0x20, '#! '}], 0xa, "f79fd3f8082f56b97231a74026524045a5b7156e0617959231d1f2be42245b930f49c35ac9306b14271be388aa354be900c03a70cf4170f554a2ae721bd77cc2138402852bd173fc53b564de88a070b3af71a87f03065b21ac7069690efc8bb3157bca942b2c778a0f31de81208fdd678a80b5c787c78a42618519a6f6055222331d"}, 0x91) 01:55:50 executing program 1: r0 = socket$inet6_tcp(0xa, 0x1, 0x0) bind$inet6(r0, &(0x7f00000003c0)={0xa, 0x4e22}, 0x1c) listen(r0, 0x0) accept4(r0, 0x0, 0x0, 0x0) (async) r1 = socket$netlink(0x10, 0x3, 0x0) (async) r2 = socket$netlink(0x10, 0x3, 0x0) (async) r3 = socket(0x10, 0x803, 0x0) sendmsg$NL80211_CMD_CRIT_PROTOCOL_START(r3, &(0x7f0000000580)={0x0, 0x0, &(0x7f0000000540)={0x0, 0x1c}}, 0x0) getsockname$packet(r3, &(0x7f0000000600)={0x11, 0x0, 0x0, 0x1, 0x0, 0x6, @broadcast}, &(0x7f0000000080)=0x14) sendmsg$nl_route(r2, &(0x7f0000000040)={0x0, 0x0, &(0x7f0000000000)={&(0x7f00000000c0)=ANY=[@ANYBLOB="3c00000010008506eb9afc4cd8d06e754a0081c5", @ANYRES32=r4, @ANYBLOB="2377f292252155b21c0012000c000100626f6e64000000000c0002000800010001"], 0x3c}}, 0x0) sendmsg$nl_route(r1, &(0x7f0000000240)={0x0, 0x0, &(0x7f0000000680)={&(0x7f0000000280)=ANY=[@ANYBLOB="4c0000001000090800"/20, @ANYRES32=0x0, @ANYBLOB="0000000000000000240012800b0001006272696467650000140002800500260000000000080005000000000008000a00a0fe6dbb59fafbd480dd116a69dc348e7b581cda5e7332193a4cde406786e1ae1896fbb78dcf93c76c4b334671a5725b81d5f0091793ff3617ce6978a1d9195c3316475f2cebd9077b7dba3ba369504dcff425ce47598cad9c9369aca2c5f0114a8a87324f8bc5979fc9ca7361ea739c6273434013bd7e1c", @ANYRES32=r4, @ANYBLOB], 0x4c}}, 0x0) (async) r5 = socket$nl_route(0x10, 0x3, 0x0) sendmsg$nl_route(r5, &(0x7f00000001c0)={&(0x7f0000000100)={0x10, 0x0, 0x0, 0x4000}, 0xc, &(0x7f0000000180)={&(0x7f0000000140)=@getnetconf={0x14, 0x52, 0x801, 0x70bd27, 0x25dfdbfd, {}, ["", "", "", ""]}, 0x14}, 0x1, 0x0, 0x0, 0x4001}, 0x4000001) [ 2000.833097][T29182] netlink: 'syz-executor.1': attribute type 1 has an invalid length. [ 2000.847943][T29182] workqueue: Failed to create a rescuer kthread for wq "bond538": -EINTR [ 2000.849222][T29189] netlink: 'syz-executor.0': attribute type 1 has an invalid length. 01:55:50 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f00000000c0)={'#! ', './file0'}, 0xb) r1 = socket$nl_route(0x10, 0x3, 0x0) sendmsg$nl_route_sched(r1, &(0x7f000000c140)={0x0, 0x0, &(0x7f00000002c0)={&(0x7f00000007c0)=ANY=[@ANYBLOB="280029aa2c332d5bea9bad39bf59452a34d9d8000030a2b55582f5fd1e6fdfdec12aa9c829dedc5b40cb985464b366067f5b3489057398384641c619000100be085754872b39582adc020000006486764c95b1a0d000c4c30300112b7cd9776bf684254a0625a1f185eefcd79e03ee1710673a0a30af7c8713cbeef788109ce3a57a779d05208325ffaaba742b71c071c7597d4c55bc07b4fa208fef7e6db9f1bef859d7360beea439da5b6cd2e3916253d9a88d84eb1121f46d4882db8bd52f042df9def409cd02dfcc0e1f8707ff433700b14433fd9e9d21e674241f64c91664afe4edb300cdccd800a51c5d1a5b7ac7200d62ebf9947ed9d4e50cd6b98e93e536d65f5436fe2000000080fbe4ec0846896a11a1a265487beef0048e040000005a4490a91e4c32f69a0f7f9d8872d3fbbd96f2f280fd1ae1409438e5c5fff965c35fb8af18e83f5cf8581035a6e3986840fed343"], 0x28}}, 0x0) write$binfmt_script(r1, &(0x7f0000000100)={'#! ', './file0', [{0x20, '#! '}], 0xa, "f79fd3f8082f56b97231a74026524045a5b7156e0617959231d1f2be42245b930f49c35ac9306b14271be388aa354be900c03a70cf4170f554a2ae721bd77cc2138402852bd173fc53b564de88a070b3af71a87f03065b21ac7069690efc8bb3157bca942b2c778a0f31de81208fdd678a80b5c787c78a42618519a6f6055222331d"}, 0x91) 01:55:50 executing program 5: r0 = openat$cgroup_ro(0xffffffffffffff9c, &(0x7f0000000080)='memory.events\x00', 0x275a, 0x0) write$binfmt_script(r0, &(0x7f0 VM DIAGNOSIS: Warning: Permanently added '10.128.0.56' (ED25519) to the list of known hosts. lock-classes: 6474 [max: 8192] direct dependencies: 50376 [max: 131072] indirect dependencies: 1017762 all direct dependencies: 2553554 dependency chains: 261684 [max: 262144] dependency chain hlocks used: 1310720 [max: 1310720] dependency chain hlocks lost: 0 in-hardirq chains: 108 in-softirq chains: 3157 in-process chains: 258418 stack-trace entries: 328485 [max: 1048576] number of stack traces: 16691 number of stack hash chains: 10473 combined max dependencies:hardirq-safe locks: 62 hardirq-unsafe locks: 5702 softirq-safe locks: 369 softirq-unsafe locks: 5266 irq-safe locks: 377 irq-unsafe locks: 5702 hardirq-read-safe locks: 4 hardirq-read-unsafe locks: 224 softirq-read-safe locks: 24 softirq-read-unsafe locks: 203 irq-read-safe locks: 24 irq-read-unsafe locks: 224 uncategorized locks: 423 unused locks: 0 max locking depth: 19 max bfs queue depth: 718 max lock class index: 6473 debug_locks: 0 zapped classes: 1740 zapped lock chains: 4762 large chain blocks: 0 all lock classes: FD: 37 BD: 1 +.+.: fill_pool_map-wait-type-override ->pool_lock#2 ->pool_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->&n->list_lock ->&____s->seqcount#2 ->rcu_node_0 ->batched_entropy_u8.lock ->kfence_freelist_lock ->init_task.mems_allowed_seq.seqcount ->&rcu_state.expedited_wq ->pgd_lock ->stock_lock ->key ->pcpu_lock ->percpu_counters_lock FD: 2 BD: 5170 -.-.: &obj_hash[i].lock ->pool_lock FD: 1 BD: 5166 -.-.: pool_lock FD: 879 BD: 15 +.+.: cgroup_mutex ->pcpu_alloc_mutex ->pool_lock#2 ->lock ->&root->kernfs_rwsem ->&obj_hash[i].lock ->cgroup_file_kn_lock ->css_set_lock ->&c->lock ->&____s->seqcount ->blkcg_pol_mutex ->&n->list_lock ->&zone->lock ->percpu_counters_lock ->shrinker_rwsem ->&base->lock ->batched_entropy_u8.lock ->&pgdat->memcg_lru.lock ->devcgroup_mutex ->cpu_hotplug_lock ->fs_reclaim ->&rq->__lock ->cgroup_rstat_lock ->cpuset_mutex ->&dom->lock ->kfence_freelist_lock ->batched_entropy_u32.lock ->cgroup_idr_lock ->task_group_lock ->(wq_completion)cpuset_migrate_mm ->&wq->mutex ->&____s->seqcount#2 ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->cgroup_mutex.wait_lock ->remove_cache_srcu ->stock_lock ->krc.lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 1 BD: 4463 -.-.: (console_sem).lock FD: 219 BD: 13 +.+.: console_lock ->console_owner_lock ->resource_lock ->pool_lock#2 ->&obj_hash[i].lock ->&zone->lock ->&____s->seqcount ->&c->lock ->kbd_event_lock ->vga_lock ->(console_sem).lock ->fs_reclaim ->&x->wait#9 ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#11 ->&fb_info->lock ->&base->lock ->&rq->__lock ->subsys mutex#5 ->&helper->lock ->&helper->damage_lock ->&lock->wait_lock ->&p->pi_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->vt_event_lock ->&meta->lock FD: 1 BD: 11 ....: console_srcu FD: 283 BD: 143 ++++: cpu_hotplug_lock ->jump_label_mutex ->static_call_mutex ->cpuhp_state_mutex ->wq_pool_mutex ->freezer_mutex ->rcu_tasks_trace__percpu.cbs_pcpu_lock ->&ACCESS_PRIVATE(rtpcp, lock) ->smpboot_threads_lock ->&obj_hash[i].lock ->&pool->lock ->&x->wait#4 ->&rq->__lock ->mem_hotplug_lock ->mem_hotplug_lock.waiters.lock ->mem_hotplug_lock.rss.gp_wait.lock ->cpu_hotplug_lock.rss.gp_wait.lock ->rcu_node_0 ->&swhash->hlist_mutex ->pmus_lock ->pcp_batch_high_lock ->&xa->xa_lock ->fs_reclaim ->pool_lock#2 ->kthread_create_lock ->&p->pi_lock ->&x->wait ->wq_pool_attach_mutex ->pcpu_alloc_mutex ->relay_channels_mutex ->&c->lock ->&n->list_lock ->&zone->lock ->&____s->seqcount ->text_mutex ->free_vmap_area_lock ->vmap_area_lock ->init_mm.page_table_lock ->(console_sem).lock ->rtc_lock ->sparse_irq_lock ->&x->wait#6 ->cpuhp_state-up ->stop_cpus_mutex ->&wq->mutex ->remove_cache_srcu ->flush_lock ->&md->mutex ->&irq_desc_lock_class ->xps_map_mutex ->&cfs_rq->removed.lock ->css_set_lock ->cpuset_mutex ->cgroup_threadgroup_rwsem ->cgroup_threadgroup_rwsem.waiters.lock ->cgroup_threadgroup_rwsem.rss.gp_wait.lock ->&list->lock#5 ->(work_completion)(flush) ->&x->wait#10 ->jump_label_mutex.wait_lock FD: 60 BD: 150 +.+.: jump_label_mutex ->text_mutex ->&rq->__lock ->text_mutex.wait_lock ->&p->pi_lock ->jump_label_mutex.wait_lock FD: 59 BD: 144 +.+.: static_call_mutex ->text_mutex ->text_mutex.wait_lock ->&p->pi_lock ->&rq->__lock FD: 58 BD: 157 +.+.: text_mutex ->ptlock_ptr(page)#2 ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq ->text_mutex.wait_lock ->&pool->lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 225 BD: 10 +.+.: console_mutex ->syslog_lock ->(console_sem).lock ->&port_lock_key ->console_lock ->console_srcu_srcu_usage.lock ->&ACCESS_PRIVATE(sdp, lock) ->console_srcu ->&root->kernfs_rwsem ->kernfs_notify_lock ->&rq->__lock FD: 28 BD: 11 +.+.: syslog_lock ->&rq->__lock FD: 1 BD: 4463 -.-.: console_owner_lock FD: 38 BD: 4462 -.-.: console_owner ->console_owner_lock ->&port_lock_key FD: 1 BD: 165 ..-.: input_pool.lock FD: 217 BD: 144 +.+.: cpuhp_state_mutex ->cpuhp_state-down ->cpuhp_state-up ->&p->pi_lock ->&x->wait#6 ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->fs_reclaim ->&c->lock ->&____s->seqcount ->lock ->&root->kernfs_rwsem ->&zone->lock ->crypto_alg_sem ->scomp_lock FD: 285 BD: 1 +.+.: clocksource_mutex ->watchdog_lock ->cpu_hotplug_lock ->(console_sem).lock ->&rq->__lock FD: 1 BD: 2 ....: watchdog_lock FD: 4 BD: 160 ++++: resource_lock ->pool_lock#2 ->&obj_hash[i].lock FD: 1 BD: 1 ....: cache_disable_lock FD: 1 BD: 4312 +.+.: pgd_lock FD: 7 BD: 309 +.+.: init_mm.page_table_lock ->pgd_lock ->&obj_hash[i].lock FD: 1 BD: 1 ....: early_pfn_lock FD: 168 BD: 1 +.+.: acpi_ioapic_lock ->ioapic_lock ->(console_sem).lock ->ioapic_mutex FD: 2 BD: 164 ....: ioapic_lock ->i8259A_lock FD: 1 BD: 1 +.+.: syscore_ops_lock FD: 1 BD: 1 ....: map_entries_lock FD: 1 BD: 7 ....: devtree_lock FD: 3 BD: 4393 ..-.: pcpu_lock ->stock_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 130 BD: 73 +.+.: param_lock ->rate_ctrl_mutex ->disk_events_mutex FD: 1 BD: 5128 ..-.: base_crng.lock FD: 1 BD: 1 ....: rcu_read_lock FD: 1 BD: 1 ....: crng_init_wait.lock FD: 2 BD: 1 ....: zonelist_update_seq ->zonelist_update_seq.seqcount FD: 1 BD: 2 ....: zonelist_update_seq.seqcount FD: 1 BD: 1 +.+.: dmar_global_lock FD: 2 BD: 5030 -.-.: &zone->lock ->&____s->seqcount FD: 1 BD: 5104 .-.-: &____s->seqcount FD: 3 BD: 4429 +.+.: &pcp->lock ->&zone->lock FD: 1 BD: 5193 -.-.: pool_lock#2 FD: 134 BD: 224 +.+.: pcpu_alloc_mutex ->pcpu_lock ->fs_reclaim ->pool_lock#2 ->free_vmap_area_lock ->vmap_area_lock ->&zone->lock ->&____s->seqcount ->init_mm.page_table_lock ->&c->lock ->&obj_hash[i].lock ->&rq->__lock ->&cfs_rq->removed.lock ->pcpu_alloc_mutex.wait_lock ->&pool->lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&n->list_lock ->&____s->seqcount#2 ->purge_vmap_area_lock ->remove_cache_srcu FD: 6 BD: 5082 -.-.: &n->list_lock ->&c->lock FD: 5 BD: 5121 -.-.: &c->lock ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 157 BD: 85 +.+.: slab_mutex ->pool_lock#2 ->&c->lock ->&n->list_lock ->pcpu_alloc_mutex ->&zone->lock ->&____s->seqcount ->&obj_hash[i].lock ->fs_reclaim ->&rq->__lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->lock ->&root->kernfs_rwsem ->&k->list_lock FD: 3 BD: 5 ....: batched_entropy_u64.lock ->crngs.lock FD: 2 BD: 5127 ..-.: crngs.lock ->base_crng.lock FD: 4 BD: 1 ....: espfix_init_mutex ->&zone->lock ->&____s->seqcount ->pool_lock#2 FD: 1 BD: 4289 ..-.: percpu_counters_lock FD: 7 BD: 4393 +.+.: &mm->page_table_lock ->&obj_hash[i].lock ->pool_lock#2 ->quarantine_lock ->&meta->lock ->kfence_freelist_lock FD: 33 BD: 4395 +.+.: ptlock_ptr(page) ->lock#4 FD: 54 BD: 4414 +.+.: ptlock_ptr(page)#2 ->lock#4 ->ptlock_ptr(page)#2/1 ->key ->&____s->seqcount ->pool_lock#2 ->lock#5 ->&zone->lock ->&folio_wait_table[i] ->&lruvec->lru_lock ->&mapping->private_lock ->&obj_hash[i].lock ->&pgdat->kswapd_wait FD: 132 BD: 3 +.+.: trace_types_lock ->fs_reclaim ->pool_lock#2 ->pin_fs_lock ->&sb->s_type->i_mutex_key#5 ->&obj_hash[i].lock FD: 1 BD: 1 ....: panic_notifier_list.lock FD: 1 BD: 1 ....: die_chain.lock FD: 134 BD: 4 +.+.: trace_event_sem ->trace_event_ida.xa_lock ->&rq->__lock ->trace_event_sem.wait_lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->fs_reclaim ->batched_entropy_u8.lock ->kfence_freelist_lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#5 ->&c->lock ->&zone->lock ->&____s->seqcount FD: 3 BD: 4029 ..-.: batched_entropy_u32.lock ->crngs.lock FD: 27 BD: 4961 -.-.: &rq->__lock ->&per_cpu_ptr(group->pcpu, cpu)->seq ->&obj_hash[i].lock ->&base->lock ->&rq->__lock/1 ->&cfs_rq->removed.lock ->&rt_b->rt_runtime_lock ->&cp->lock ->pool_lock#2 ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->cid_lock FD: 1 BD: 4962 ....: &cfs_b->lock FD: 28 BD: 149 ....: init_task.pi_lock ->&rq->__lock FD: 1 BD: 1 ....: init_task.vtime_seqcount FD: 136 BD: 148 +.+.: wq_pool_mutex ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&wq->mutex ->&obj_hash[i].lock ->&pool->lock/1 ->fs_reclaim ->kthread_create_lock ->&p->pi_lock ->&rq->__lock ->&x->wait ->wq_pool_attach_mutex ->(console_sem).lock ->quarantine_lock ->&xa->xa_lock ->&n->list_lock ->&____s->seqcount#2 ->rcu_node_0 ->&cfs_rq->removed.lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->remove_cache_srcu ->pgd_lock ->stock_lock ->key ->pcpu_lock ->percpu_counters_lock FD: 33 BD: 161 +.+.: &wq->mutex ->&pool->lock ->&pool->lock/1 ->&x->wait#10 ->&rq->__lock FD: 30 BD: 4666 -.-.: &pool->lock ->&obj_hash[i].lock ->&p->pi_lock ->pool_lock#2 ->&base->lock ->wq_mayday_lock FD: 32 BD: 4516 ..-.: &pool->lock/1 ->&obj_hash[i].lock ->&p->pi_lock ->pool_lock#2 ->&base->lock ->&x->wait#10 ->wq_mayday_lock FD: 128 BD: 59 ++++: shrinker_rwsem ->pool_lock#2 ->&c->lock ->&n->list_lock ->&____s->seqcount ->fs_reclaim ->&rq->__lock ->&obj_hash[i].lock ->krc.lock ->rcu_node_0 ->f2fs_list_lock ->tk_core.seq.seqcount ->&sbi->s_es_lock ->&journal->j_list_lock FD: 1 BD: 4474 -.-.: rcu_node_0 FD: 4 BD: 77 -.-.: rcu_state.barrier_lock ->rcu_node_0 ->&obj_hash[i].lock FD: 31 BD: 3 ....: &rnp->exp_poll_lock FD: 9 BD: 5 ....: trace_event_ida.xa_lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&c->lock FD: 1 BD: 1 ....: trigger_cmd_mutex FD: 1 BD: 165 ....: i8259A_lock FD: 128 BD: 145 +.+.: irq_domain_mutex ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock FD: 11 BD: 308 +.+.: free_vmap_area_lock ->&obj_hash[i].lock ->pool_lock#2 ->init_mm.page_table_lock ->quarantine_lock ->&meta->lock ->kfence_freelist_lock FD: 1 BD: 307 +.+.: vmap_area_lock FD: 7 BD: 161 -.-.: &irq_desc_lock_class ->i8259A_lock ->vector_lock ->ioapic_lock ->mask_lock ->tmp_mask_lock ->irq_resend_lock FD: 37 BD: 86 +.+.: vmap_purge_lock ->purge_vmap_area_lock ->free_vmap_area_lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->rcu_node_0 ->&rcu_state.expedited_wq FD: 8 BD: 233 +.+.: purge_vmap_area_lock ->&obj_hash[i].lock ->pool_lock#2 ->&____s->seqcount ->&meta->lock ->kfence_freelist_lock ->quarantine_lock FD: 2 BD: 85 +.+.: cpa_lock ->pgd_lock FD: 5 BD: 2 -.-.: timekeeper_lock ->tk_core.seq.seqcount ->pvclock_gtod_data FD: 4 BD: 5032 ----: tk_core.seq.seqcount ->&obj_hash[i].lock ->pvclock_gtod_data FD: 12 BD: 5040 -.-.: &base->lock ->&obj_hash[i].lock FD: 176 BD: 146 +.+.: pmus_lock ->pcpu_alloc_mutex ->pool_lock#2 ->&obj_hash[i].lock ->&cpuctx_mutex ->fs_reclaim ->&k->list_lock ->lock ->&root->kernfs_rwsem ->uevent_sock_mutex ->running_helpers_waitq.lock ->&rq->__lock ->&x->wait#9 ->bus_type_sem ->&c->lock ->&zone->lock ->&____s->seqcount ->sysfs_symlink_target_lock ->&k->k_lock ->&dev->power.lock ->dpm_list_mtx ->subsys mutex#29 FD: 1 BD: 146 +.+.: &swhash->hlist_mutex FD: 1 BD: 147 +.+.: &cpuctx_mutex FD: 1 BD: 2 ....: tty_ldiscs_lock FD: 2 BD: 14 ....: kbd_event_lock ->led_lock FD: 1 BD: 15 ..-.: led_lock FD: 1 BD: 14 ....: vga_lock FD: 36 BD: 4467 -.-.: &port_lock_key ->&dev->power.lock ->&port->lock ->&tty->write_wait FD: 3 BD: 11 ....: console_srcu_srcu_usage.lock ->&obj_hash[i].lock FD: 1 BD: 40 ..-.: &ACCESS_PRIVATE(sdp, lock) FD: 44 BD: 4 +.+.: init_task.alloc_lock ->init_fs.lock FD: 15 BD: 1 +.+.: acpi_ioremap_lock ->pool_lock#2 ->resource_lock ->memtype_lock ->free_vmap_area_lock ->vmap_area_lock FD: 1 BD: 2 +.+.: memtype_lock FD: 1 BD: 17 ....: semaphore->lock FD: 1 BD: 13 ....: *(&acpi_gbl_reference_count_lock) FD: 9 BD: 1 ....: clockevents_lock ->tk_core.seq.seqcount ->tick_broadcast_lock ->i8253_lock FD: 3 BD: 2 -...: tick_broadcast_lock ->jiffies_lock FD: 1 BD: 2 ....: i8253_lock FD: 21 BD: 12 +.+.: &desc->request_mutex ->&irq_desc_lock_class ->proc_subdir_lock ->&ent->pde_unload_lock ->proc_inum_ida.xa_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 167 BD: 2 +.+.: ioapic_mutex ->&domain->mutex FD: 166 BD: 147 +.+.: &domain->mutex ->pool_lock#2 ->vector_lock ->&irq_desc_lock_class ->i8259A_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->sparse_irq_lock ->fs_reclaim ->&n->list_lock FD: 1 BD: 164 -.-.: vector_lock FD: 1 BD: 1 +.+.: &pool->lock#2 FD: 2 BD: 3 -.-.: jiffies_lock ->jiffies_seq.seqcount FD: 1 BD: 4 -.-.: jiffies_seq.seqcount FD: 16 BD: 5003 -.-.: hrtimer_bases.lock ->tk_core.seq.seqcount ->&obj_hash[i].lock FD: 29 BD: 1 -.-.: log_wait.lock ->&p->pi_lock FD: 284 BD: 2 +.+.: spec_ctrl_mutex ->cpu_hotplug_lock ->(console_sem).lock ->&rq->__lock FD: 2 BD: 1 +.-.: drivers/char/random.c:1010 ->input_pool.lock FD: 31 BD: 4489 +.+.: sysctl_lock ->&obj_hash[i].lock ->pool_lock#2 ->krc.lock ->&x->wait#27 FD: 38 BD: 2 +.+.: tomoyo_policy_lock ->pool_lock#2 ->mmu_notifier_invalidate_range_start ->&c->lock ->&zone->lock ->&____s->seqcount ->&n->list_lock ->&rq->__lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->remove_cache_srcu FD: 2 BD: 1 ....: aa_secids.xa_lock ->pool_lock#2 FD: 1 BD: 2 +.+.: aa_buffers_lock FD: 1017 BD: 4 ++++: pernet_ops_rwsem ->stack_depot_init_mutex ->crngs.lock ->net_rwsem ->proc_inum_ida.xa_lock ->pool_lock#2 ->proc_subdir_lock ->fs_reclaim ->&____s->seqcount ->&c->lock ->sysctl_lock ->pcpu_alloc_mutex ->net_generic_ids.xa_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#8 ->&dir->lock ->&obj_hash[i].lock ->k-sk_lock-AF_NETLINK ->k-slock-AF_NETLINK ->nl_table_lock ->nl_table_wait.lock ->rtnl_mutex ->&zone->lock ->uevent_sock_mutex ->&net->rules_mod_lock ->slab_mutex ->batched_entropy_u32.lock ->percpu_counters_lock ->cache_list_lock ->tk_core.seq.seqcount ->&k->list_lock ->lock ->&root->kernfs_rwsem ->&pool->lock/1 ->running_helpers_waitq.lock ->&rq->__lock ->&sn->pipefs_sb_lock ->krc.lock ->&s->s_inode_list_lock ->nf_hook_mutex ->cpu_hotplug_lock ->hwsim_netgroup_ida.xa_lock ->nf_connlabels_lock ->nf_ct_ecache_mutex ->nf_log_mutex ->batched_entropy_u8.lock ->kfence_freelist_lock ->ipvs->est_mutex ->&base->lock ->__ip_vs_app_mutex ->&hashinfo->lock#2 ->&net->ipv6.ip6addrlbl_table.lock ->(console_sem).lock ->k-sk_lock-AF_INET6 ->k-slock-AF_INET6 ->k-clock-AF_INET6 ->wq_pool_mutex ->pcpu_lock ->&list->lock#4 ->&dir->lock#2 ->ptype_lock ->k-clock-AF_TIPC ->k-sk_lock-AF_TIPC ->k-slock-AF_TIPC ->&this->receive_lock ->once_lock ->&cfs_rq->removed.lock ->nf_ct_proto_mutex ->k-sk_lock-AF_RXRPC ->k-slock-AF_RXRPC ->&rxnet->conn_lock ->&call->waitq ->&rx->call_lock ->&rxnet->call_lock ->&n->list_lock ->pool_lock ->rtnl_mutex.wait_lock ->&p->pi_lock ->rdma_nets.xa_lock ->devices_rwsem ->uevent_sock_mutex.wait_lock ->remove_cache_srcu ->stock_lock ->&____s->seqcount#2 ->rcu_node_0 ->&net->nsid_lock ->ebt_mutex ->&xt[i].mutex ->&nft_net->commit_mutex ->netns_bpf_mutex ->&rnp->exp_wq[0] ->(&net->fs_probe_timer) ->&net->cells_lock ->(&net->cells_timer) ->bit_wait_table + i ->(&net->fs_timer) ->(wq_completion)kafsd ->&wq->mutex ->k-clock-AF_RXRPC ->&local->services_lock ->(wq_completion)krxrpcd ->rlock-AF_RXRPC ->&x->wait ->&xa->xa_lock#8 ->&fsnotify_mark_srcu ->&ent->pde_unload_lock ->ovs_mutex ->(work_completion)(&(&ovs_net->masks_rebalance)->work) ->(work_completion)(&ovs_net->dp_notify_work) ->&srv->idr_lock ->&rnp->exp_wq[3] ->(work_completion)(&tn->work) ->&tn->nametbl_lock ->&rnp->exp_wq[1] ->&rnp->exp_wq[2] ->(work_completion)(&ht->run_work) ->&ht->mutex ->(work_completion)(&(&c->work)->work) ->(wq_completion)krdsd ->(work_completion)(&rtn->rds_tcp_accept_w) ->rds_tcp_conn_lock ->loop_conns_lock ->(wq_completion)l2tp ->rcu_state.barrier_mutex ->(&rxnet->peer_keepalive_timer) ->(work_completion)(&rxnet->peer_keepalive_work) ->(&rxnet->service_conn_reap_timer) ->&x->wait#10 ->dev_base_lock ->lweventlist_lock ->napi_hash_lock ->netdev_unregistering_wq.lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&fn->fou_lock ->ipvs->sync_mutex ->hwsim_radio_lock ->rdma_nets_rwsem ->k-clock-AF_NETLINK ->&nlk->wait ->wlock-AF_NETLINK ->&hn->hn_lock ->&pnettable->lock ->&pnetids_ndev->lock ->k-sk_lock-AF_INET6/1 ->&net->sctp.addr_wq_lock ->&rcu_state.expedited_wq ->k-sk_lock-AF_INET ->k-slock-AF_INET ->&sn->gssp_lock ->&cd->hash_lock ->(&net->can.stattimer) ->xfrm_state_gc_work ->&net->xfrm.xfrm_state_lock ->(work_completion)(&(&net->ipv6.addr_chk_work)->work) ->ip6_fl_lock ->(&net->ipv6.ip6_fib_timer) ->__ip_vs_mutex ->(&ipvs->dest_trash_timer) ->(work_completion)(&(&ipvs->expire_nodest_conn_work)->work) ->(work_completion)(&(&ipvs->defense_work)->work) ->(work_completion)(&(&ipvs->est_reload_work)->work) ->nfnl_subsys_ipset ->recent_lock ->hashlimit_mutex ->trans_gc_work ->(work_completion)(&(&cnet->ecache.dwork)->work) ->quarantine_lock ->sysfs_symlink_target_lock ->kernfs_idr_lock ->tcp_metrics_lock ->k-clock-AF_INET ->(work_completion)(&net->xfrm.policy_hash_work) ->&net->xfrm.xfrm_policy_lock ->(work_completion)(&net->xfrm.state_hash_work) ->&list->lock#2 ->&xa->xa_lock#3 ->genl_sk_destructing_waitq.lock ->rcu_state.exp_mutex.wait_lock ->&rnp->exp_lock ->rcu_state.exp_mutex ->&meta->lock ->&sem->wait_lock ->rcu_state.barrier_mutex.wait_lock ->pgd_lock ->key ->stack_depot_init_mutex.wait_lock ->&device->compat_devs_mutex ->dev_pm_qos_sysfs_mtx ->subsys mutex#84 ->&x->wait#9 ->dpm_list_mtx ->&dev->power.lock ->deferred_probe_mutex ->device_links_lock ->gdp_mutex ->&device->unregistration_lock ->devices_rwsem.wait_lock ->(&timer.timer) ->pcpu_alloc_mutex.wait_lock ->rdma_nets_rwsem.wait_lock ->&lock->wait_lock ->dev_pm_qos_sysfs_mtx.wait_lock FD: 29 BD: 75 +.+.: stack_depot_init_mutex ->&rq->__lock ->stack_depot_init_mutex.wait_lock FD: 148 BD: 3781 ++++: net_rwsem ->&list->lock#2 ->&rq->__lock ->pool_lock#2 ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&c->lock ->quarantine_lock ->&n->list_lock ->&table->lock#4 ->&____s->seqcount ->&ndev->lock ->&cfs_rq->removed.lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 11 BD: 94 ....: proc_inum_ida.xa_lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&obj_hash[i].lock FD: 870 BD: 71 +.+.: rtnl_mutex ->&c->lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->fs_reclaim ->pcpu_alloc_mutex ->&xa->xa_lock#3 ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&rq->__lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#17 ->&dir->lock#2 ->dev_hotplug_mutex ->dev_base_lock ->input_pool.lock ->nl_table_lock ->nl_table_wait.lock ->net_rwsem ->batched_entropy_u32.lock ->&tbl->lock ->sysctl_lock ->krc.lock ->stack_depot_init_mutex ->cpu_hotplug_lock ->kthread_create_lock ->&p->pi_lock ->&x->wait ->wq_pool_mutex ->crngs.lock ->&pool->lock/1 ->&cfs_rq->removed.lock ->lweventlist_lock ->&pool->lock ->rtnl_mutex.wait_lock ->proc_subdir_lock ->proc_inum_ida.xa_lock ->pool_lock ->&k->k_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->param_lock ->(console_sem).lock ->&rdev->wiphy.mtx ->&base->lock ->subsys mutex#55 ->&sdata->sec_mtx ->&local->iflist_mtx#2 ->&n->list_lock ->lock#7 ->failover_lock ->&tn->lock ->&idev->mc_lock ->quarantine_lock ->&ndev->lock ->rcu_node_0 ->&pnettable->lock ->smc_ib_devices.mutex ->&(&net->nexthop.notifier_chain)->rwsem ->reg_requests_lock ->reg_pending_beacons_lock ->rlock-AF_NETLINK ->(inetaddr_validator_chain).rwsem ->(inetaddr_chain).rwsem ->_xmit_LOOPBACK ->netpoll_srcu ->&in_dev->mc_tomb_lock ->&im->lock ->fib_info_lock ->cbs_list_lock ->(inet6addr_validator_chain).rwsem ->&net->ipv6.addrconf_hash_lock ->&ifa->lock ->&tb->tb6_lock ->&dev_addr_list_lock_key ->napi_hash_lock ->lapb_list_lock ->x25_neigh_list_lock ->console_owner_lock ->console_owner ->_xmit_ETHER ->_xmit_SLIP ->&sem->wait_lock ->&vi->refill_lock ->noop_qdisc.q.lock ->remove_cache_srcu ->&rfkill->lock ->&local->chanctx_mtx ->&dev->tx_global_lock ->&rnp->exp_wq[2] ->&sch->q.lock ->class ->(&tbl->proxy_timer) ->_xmit_VOID ->_xmit_X25 ->&lapbeth->up_lock ->&lapb->lock ->&rnp->exp_wq[0] ->&dir->lock ->&ul->lock#2 ->&n->lock ->dev_addr_sem ->_xmit_IEEE802154 ->reg_indoor_lock ->&meta->lock ->&nr_netdev_addr_lock_key ->listen_lock ->uevent_sock_mutex.wait_lock ->&r->consumer_lock ->&mm->mmap_lock ->pcpu_lock ->(switchdev_blocking_notif_chain).rwsem ->&br->hash_lock ->nf_hook_mutex ->j1939_netdev_lock ->&bat_priv->tvlv.handler_list_lock ->&bat_priv->tvlv.container_list_lock ->&bat_priv->softif_vlan_list_lock ->key#16 ->&bat_priv->tt.changes_list_lock ->kernfs_idr_lock ->&rnp->exp_wq[3] ->tk_core.seq.seqcount ->&wq->mutex ->init_lock ->&rnp->exp_wq[1] ->deferred_lock ->target_list_lock ->&br->lock ->&pn->hash_lock ->&rcu_state.expedited_wq ->team->team_lock_key ->&hard_iface->bat_iv.ogm_buff_mutex ->ptype_lock ->_xmit_NONE ->lock#9 ->team->team_lock_key#2 ->team->team_lock_key#3 ->team->team_lock_key#4 ->team->team_lock_key#5 ->team->team_lock_key#6 ->&hsr->list_lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->mount_lock ->&xa->xa_lock#14 ->&dev_addr_list_lock_key#3/1 ->req_lock ->&x->wait#11 ->subsys mutex#82 ->bpf_devs_lock ->(work_completion)(&(&devlink_port->type_warn_dw)->work) ->&devlink_port->type_lock ->&vn->sock_lock ->devnet_rename_sem ->&nft_net->commit_mutex ->&ent->pde_unload_lock ->&wg->device_update_lock ->_xmit_SIT ->&bridge_netdev_addr_lock_key/1 ->_xmit_TUNNEL ->_xmit_IPGRE ->_xmit_TUNNEL6 ->&dev_addr_list_lock_key/1 ->&dev_addr_list_lock_key#2/1 ->_xmit_ETHER/1 ->&nn->netlink_tap_lock ->&batadv_netdev_addr_lock_key/1 ->pgd_lock ->key ->percpu_counters_lock ->&vlan_netdev_addr_lock_key/1 ->&macvlan_netdev_addr_lock_key/1 ->&ipvlan->addrs_lock ->&macsec_netdev_addr_lock_key/1 ->key#20 ->&bat_priv->tt.commit_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#8 ->k-sk_lock-AF_INET ->k-slock-AF_INET ->k-sk_lock-AF_INET6 ->k-slock-AF_INET6 ->&lock->wait_lock ->&ul->lock ->&____s->seqcount#2 ->stock_lock ->&net->xdp.lock ->mirred_list_lock ->&idev->mc_query_lock ->(work_completion)(&(&idev->mc_report_work)->work) ->&idev->mc_report_lock ->&pnn->pndevs.lock ->&pnn->routes.lock ->dev_pm_qos_sysfs_mtx ->deferred_probe_mutex ->device_links_lock ->rcu_state.exp_mutex.wait_lock ->&rnp->exp_lock ->&caifn->caifdevs.lock ->pcpu_alloc_mutex.wait_lock ->&net->rules_mod_lock ->(&mrt->ipmr_expire_timer) ->(work_completion)(&ht->run_work) ->&ht->mutex ->nf_connlabels_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&tun->lock ->wlock-AF_UNSPEC ->elock-AF_UNSPEC ->&hwstats->hwsdev_list_lock ->qdisc_mod_lock ->&block->lock ->&block->cb_lock ->&chain->filter_chain_lock ->cls_mod_lock ->act_mod_lock ->&tn->idrinfo->lock ->&p->tcfa_lock ->&block->proto_destroy_lock ->&bridge_netdev_addr_lock_key ->&p->alloc_lock ->rcu_state.exp_mutex ->&pn->all_ppp_mutex ->&ppp->rlock ->&ppp->wlock ->&dev_addr_list_lock_key#4 ->&pf->rwait ->dev->qdisc_tx_busylock ?: &qdisc_tx_busylock ->__ip_vs_mutex ->flowtable_lock ->&tn->idrinfo->lock#2 ->zones_mutex ->flow_indr_block_lock ->(&q->adapt_timer) ->&list->lock#2 ->(&brmctx->ip4_mc_router_timer) ->(&brmctx->ip4_other_query.timer) ->(&brmctx->ip4_own_query.timer) ->(&brmctx->ip6_mc_router_timer) ->(&brmctx->ip6_other_query.timer) ->(&brmctx->ip6_own_query.timer) ->(work_completion)(&(&br->gc_work)->work) ->&br->multicast_lock ->(work_completion)(&br->mcast_gc_work) ->rcu_state.barrier_mutex ->mrt_lock ->&net->ipv4.ra_mutex ->sk_lock-AF_INET6 ->slock-AF_INET6 ->sk_lock-AF_INET ->slock-AF_INET ->&dev_addr_list_lock_key#2 ->team->team_lock_key#7 ->team->team_lock_key#8 ->team->team_lock_key#9 ->k-clock-AF_INET6 ->&xa->xa_lock#8 ->&fsnotify_mark_srcu ->sk_lock-AF_CAN ->slock-AF_CAN ->free_vmap_area_lock ->vmap_area_lock ->&local->sta_mtx ->purge_vmap_area_lock ->mrt_lock#2 ->&newf->file_lock ->&sb->s_type->i_lock_key#15 ->bpf_dispatcher_xdp.mutex ->&r->consumer_lock#3 ->&r->consumer_lock#4 ->sk_lock-AF_UNSPEC ->slock-AF_UNSPEC ->(work_completion)(&(&slave->notify_work)->work) ->_xmit_ETHER/2 ->(&hsr->prune_timer) ->(&hsr->announce_timer) ->&dentry->d_lock ->&sb->s_type->i_lock_key#7 ->&s->s_inode_list_lock ->&bridge_netdev_addr_lock_key/2 ->_xmit_NETROM#2 ->&this->info_list_lock ->&app->lock ->(&app->join_timer) ->(&app->periodic_timer) ->&list->lock#11 ->(&app->join_timer)#2 ->&app->lock#2 ->&list->lock#12 ->&x->wait#27 ->&sb->s_type->i_lock_key#23 ->_xmit_PHONET_PIPE ->&dev_addr_list_lock_key#3/2 ->(work_completion)(&port->wq) ->&x->wait#10 ->&batadv_netdev_addr_lock_key ->&bond->mode_lock ->(work_completion)(&(&bond->mii_work)->work) ->(work_completion)(&(&bond->arp_work)->work) ->(work_completion)(&(&bond->alb_work)->work) ->(work_completion)(&(&bond->ad_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->(work_completion)(&(&bond->slave_arr_work)->work) ->&net->xfrm.xfrm_state_lock ->dev_pm_qos_sysfs_mtx.wait_lock ->rename_lock.seqcount ->&tn->idrinfo->lock#3 ->&r->consumer_lock#2 ->&wg->socket_update_lock ->&table->hash[i].lock ->k-clock-AF_INET ->&tn->idrinfo->lock#4 ->&tn->idrinfo->lock#5 ->wq_mayday_lock ->(&pmctx->ip6_mc_router_timer) ->(&pmctx->ip4_mc_router_timer) ->&tn->idrinfo->lock#6 ->&dev_addr_list_lock_key/2 ->(&br->hello_timer) ->(&br->topology_change_timer) ->(&br->tcn_timer) ->&r->consumer_lock#6 ->dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 ->acaddr_hash_lock ->&net->xfrm.xfrm_policy_lock ->&xs->mutex ->&pmc->lock ->&macvlan_netdev_addr_lock_key/2 ->(work_completion)(&port->bc_work) ->&tn->nametbl_lock ->&ht->lock ->k-sk_lock-AF_TIPC ->k-slock-AF_TIPC ->&tipc_net(net)->bclock ->&tn->idrinfo->lock#7 ->hrtimer_bases.lock ->(&mp->timer) ->&pgdat->kswapd_wait ->ematch_mod_lock FD: 35 BD: 247 +.+.: lock ->kernfs_idr_lock ->cgroup_idr_lock ->pidmap_lock ->drm_minor_lock ->&file_private->table_lock ->&q->queue_lock ->sg_index_lock ->map_idr_lock ->prog_idr_lock ->btf_idr_lock ->&group->inotify_data.idr_lock ->link_idr_lock ->sctp_assocs_id_lock FD: 13 BD: 4416 +.+.: kernfs_idr_lock ->pool_lock#2 ->&c->lock ->&zone->lock ->&____s->seqcount ->&obj_hash[i].lock ->&n->list_lock ->&____s->seqcount#2 ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 132 BD: 250 ++++: &root->kernfs_rwsem ->&root->kernfs_iattr_rwsem ->kernfs_idr_lock ->&obj_hash[i].lock ->pool_lock#2 ->&rq->__lock ->&____s->seqcount ->&zone->lock ->&cfs_rq->removed.lock ->quarantine_lock ->&meta->lock ->kfence_freelist_lock ->&base->lock ->inode_hash_lock ->fs_reclaim ->mmu_notifier_invalidate_range_start ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#24 ->&c->lock ->batched_entropy_u8.lock ->rcu_node_0 ->remove_cache_srcu ->&n->list_lock ->&sem->wait_lock ->&sb->s_type->i_lock_key#30 ->&sb->s_type->i_lock_key#31 ->kernfs_rename_lock ->&xa->xa_lock#4 ->stock_lock ->&____s->seqcount#2 ->&rcu_state.expedited_wq ->&p->pi_lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->pool_lock FD: 1 BD: 4 ++++: file_systems_lock FD: 129 BD: 255 ++++: &root->kernfs_iattr_rwsem ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->iattr_mutex ->&sem->wait_lock ->tk_core.seq.seqcount ->pool_lock#2 ->rcu_node_0 ->pgd_lock ->stock_lock ->key ->pcpu_lock ->percpu_counters_lock ->&rcu_state.expedited_wq FD: 5 BD: 51 +.+.: sb_lock ->unnamed_dev_ida.xa_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 141 BD: 1 +.+.: &type->s_umount_key/1 ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&obj_hash[i].lock ->percpu_counters_lock ->crngs.lock ->&sbinfo->stat_lock ->&zone->lock ->&____s->seqcount ->&c->lock ->&sb->s_type->i_lock_key ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->batched_entropy_u32.lock ->&dentry->d_lock ->fs_reclaim ->mmu_notifier_invalidate_range_start FD: 1 BD: 41 +.+.: list_lrus_mutex FD: 1 BD: 52 ....: unnamed_dev_ida.xa_lock FD: 1 BD: 27 +.+.: &sbinfo->stat_lock FD: 56 BD: 4426 +.+.: &sb->s_type->i_lock_key ->&dentry->d_lock ->&xa->xa_lock#8 ->&dentry->d_lock/1 FD: 1 BD: 4419 +.+.: &s->s_inode_list_lock FD: 41 BD: 4488 +.+.: &dentry->d_lock ->&wq ->&dentry->d_lock/1 ->&wq#2 ->&lru->node[i].lock ->sysctl_lock ->&wq#3 ->&dentry->d_lock/2 ->&p->pi_lock FD: 2 BD: 29 ....: mnt_id_ida.xa_lock ->pool_lock#2 FD: 45 BD: 196 +.+.: mount_lock ->mount_lock.seqcount ->&dentry->d_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 43 BD: 196 +.+.: mount_lock.seqcount ->&new_ns->poll ->&dentry->d_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 139 BD: 1 +.+.: &type->s_umount_key#2/1 ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&____s->seqcount ->&c->lock ->&sb->s_type->i_lock_key#2 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 42 BD: 4426 +.+.: &sb->s_type->i_lock_key#2 ->&dentry->d_lock FD: 1 BD: 2 ..-.: ucounts_lock FD: 43 BD: 218 +.+.: init_fs.lock ->init_fs.seq.seqcount ->&dentry->d_lock FD: 1 BD: 211 +.+.: init_fs.seq.seqcount FD: 139 BD: 1 +.+.: &type->s_umount_key#3/1 ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&zone->lock ->&____s->seqcount ->&c->lock ->&sb->s_type->i_lock_key#3 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 56 BD: 4430 +.+.: &sb->s_type->i_lock_key#3 ->&dentry->d_lock ->&xa->xa_lock#8 FD: 1 BD: 145 +.+.: cpuhp_state-down FD: 211 BD: 145 +.+.: cpuhp_state-up ->smpboot_threads_lock ->sparse_irq_lock ->&swhash->hlist_mutex ->pmus_lock ->&x->wait#5 ->&obj_hash[i].lock ->hrtimer_bases.lock ->wq_pool_mutex ->rcu_node_0 ->&rq->__lock ->jump_label_mutex ->fs_reclaim ->&c->lock ->&n->list_lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&x->wait#9 ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&x->wait#11 ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#24 ->subsys mutex#25 ->&k->k_lock ->subsys mutex#79 ->&base->lock ->swap_slots_cache_mutex FD: 1 BD: 96 ++++: proc_subdir_lock FD: 140 BD: 1 +.+.: &type->s_umount_key#4/1 ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&sb->s_type->i_lock_key#4 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 43 BD: 2 +.+.: &sb->s_type->i_lock_key#4 ->&dentry->d_lock ->bit_wait_table + i FD: 32 BD: 152 ....: cgroup_file_kn_lock ->kernfs_notify_lock FD: 35 BD: 151 ..-.: css_set_lock ->cgroup_file_kn_lock ->&p->pi_lock ->&obj_hash[i].lock ->pool_lock#2 ->krc.lock ->kernfs_rename_lock FD: 9 BD: 248 +...: cgroup_idr_lock ->pool_lock#2 ->&c->lock ->&zone->lock ->&____s->seqcount FD: 147 BD: 146 +.+.: cpuset_mutex ->callback_lock ->jump_label_mutex ->&p->pi_lock ->&p->alloc_lock ->cpuset_attach_wq.lock ->&rq->__lock ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->css_set_lock FD: 1 BD: 147 ....: callback_lock FD: 135 BD: 16 +.+.: blkcg_pol_mutex ->pcpu_alloc_mutex ->fs_reclaim ->pool_lock#2 ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&n->list_lock FD: 3 BD: 5117 -.-.: batched_entropy_u8.lock ->crngs.lock FD: 1 BD: 4410 ....: &pgdat->memcg_lru.lock FD: 1 BD: 17 +.+.: devcgroup_mutex FD: 49 BD: 146 +.+.: freezer_mutex ->freezer_lock ->&rq->__lock ->rcu_node_0 ->freezer_mutex.wait_lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->&rcu_state.expedited_wq FD: 43 BD: 111 +.+.: rcu_state.exp_mutex ->rcu_node_0 ->rcu_state.exp_wake_mutex ->&obj_hash[i].lock ->&pool->lock ->&rnp->exp_wq[2] ->&rq->__lock ->&rnp->exp_wq[3] ->&rnp->exp_wq[0] ->&rnp->exp_wq[1] ->rcu_state.exp_mutex.wait_lock ->pool_lock#2 ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock FD: 38 BD: 178 +.+.: rcu_state.exp_wake_mutex ->rcu_node_0 ->&rnp->exp_lock ->&rnp->exp_wq[0] ->&rnp->exp_wq[1] ->&rnp->exp_wq[2] ->&rnp->exp_wq[3] ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->rcu_state.exp_wake_mutex.wait_lock ->&pool->lock FD: 1 BD: 179 +.+.: &rnp->exp_lock FD: 29 BD: 206 ....: &rnp->exp_wq[0] ->&p->pi_lock FD: 29 BD: 208 ....: &rnp->exp_wq[1] ->&p->pi_lock FD: 1 BD: 149 ....: init_sighand.siglock FD: 1 BD: 3 +.+.: init_files.file_lock FD: 13 BD: 257 ....: pidmap_lock ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->&zone->lock ->&____s->seqcount ->&n->list_lock ->&____s->seqcount#2 ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 174 BD: 145 ++++: cgroup_threadgroup_rwsem ->css_set_lock ->&p->pi_lock ->tk_core.seq.seqcount ->tasklist_lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->&sighand->siglock ->cgroup_threadgroup_rwsem.rss.gp_wait.lock ->rcu_node_0 ->inode_hash_lock ->fs_reclaim ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#30 ->&root->kernfs_iattr_rwsem ->&s->s_inode_list_lock ->&xa->xa_lock#8 ->&fsnotify_mark_srcu ->&c->lock ->cpuset_mutex ->&p->alloc_lock ->freezer_mutex ->&____s->seqcount#2 ->&____s->seqcount ->freezer_mutex.wait_lock ->&rcu_state.expedited_wq FD: 28 BD: 4878 -.-.: &p->pi_lock ->&rq->__lock ->&cfs_rq->removed.lock FD: 70 BD: 148 .+.+: tasklist_lock ->init_task.pi_lock ->init_sighand.siglock ->&p->pi_lock ->&sighand->siglock ->&pid->wait_pidfd ->&obj_hash[i].lock ->&c->lock ->&n->list_lock ->&p->alloc_lock ->stock_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&base->lock FD: 1 BD: 4962 -.-.: &per_cpu_ptr(group->pcpu, cpu)->seq FD: 1 BD: 1 ....: (kthreadd_done).wait.lock FD: 43 BD: 155 ....: &sighand->siglock ->&sig->wait_chldexit ->input_pool.lock ->&(&sig->stats_lock)->lock ->&____s->seqcount ->pool_lock#2 ->&c->lock ->hrtimer_bases.lock ->&p->pi_lock ->&obj_hash[i].lock ->&sighand->signalfd_wqh ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&tty->ctrl.lock ->&prev->lock ->quarantine_lock ->&rq->__lock ->stock_lock ->&n->list_lock ->&____s->seqcount#2 ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 51 BD: 216 +.+.: &p->alloc_lock ->&____s->seqcount#2 ->init_fs.lock ->&fs->lock ->&x->wait ->&memcg->mm_list.lock ->&x->wait#25 ->&newf->file_lock FD: 1 BD: 5075 .-.-: &____s->seqcount#2 FD: 127 BD: 4409 +.+.: fs_reclaim ->mmu_notifier_invalidate_range_start ->&mapping->i_mmap_rwsem ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->rcu_node_0 ->pgd_lock ->stock_lock ->key ->pcpu_lock ->percpu_counters_lock ->&rcu_state.expedited_wq ->&zone->lock ->&pgdat->kcompactd_wait ->lock#4 ->lock#5 ->batched_entropy_u8.lock ->&pgdat->memcg_lru.lock ->&lruvec->lru_lock ->&mapping->private_lock ->&sb->s_type->i_lock_key#3 ->&memcg->mm_list.lock ->&c->lock ->&vmpr->sr_lock ->&sb->s_type->i_lock_key#22 ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&meta->lock ->kfence_freelist_lock ->quarantine_lock ->&n->list_lock FD: 37 BD: 4419 +.+.: mmu_notifier_invalidate_range_start ->dma_fence_map ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pgd_lock ->pool_lock#2 ->key ->pcpu_lock ->percpu_counters_lock ->stock_lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 1 BD: 175 +.+.: kthread_create_lock FD: 29 BD: 248 ....: &x->wait ->&p->pi_lock FD: 35 BD: 150 +.+.: wq_pool_attach_mutex ->&p->pi_lock ->&x->wait#7 ->&pool->lock ->&pool->lock/1 ->&rq->__lock ->wq_pool_attach_mutex.wait_lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 29 BD: 4671 ..-.: wq_mayday_lock ->&p->pi_lock FD: 1 BD: 149 ....: &xa->xa_lock FD: 33 BD: 1 +.-.: (&pool->mayday_timer) ->&pool->lock/1 ->&obj_hash[i].lock ->&base->lock ->&pool->lock FD: 55 BD: 1 +.+.: (wq_completion)rcu_gp ->(work_completion)(&rnp->exp_poll_wq) ->(work_completion)(&(&ssp->srcu_sup->work)->work) ->(work_completion)(&sdp->work) ->(work_completion)(&rew->rew_work) ->&rq->__lock FD: 32 BD: 2 +.+.: (work_completion)(&rnp->exp_poll_wq) ->&rnp->exp_poll_lock FD: 13 BD: 1 +.-.: (&wq_watchdog_timer) ->&obj_hash[i].lock ->&base->lock FD: 968 BD: 1 +.+.: (wq_completion)events_unbound ->(work_completion)(&(&kfence_timer)->work) ->(work_completion)(&entry->work) ->(next_reseed).work ->(work_completion)(&sub_info->work) ->(stats_flush_dwork).work ->deferred_probe_work ->(work_completion)(&barr->work) ->(work_completion)(&map->work) ->connector_reaper_work ->(reaper_work).work ->(work_completion)(&port->bc_work) ->(work_completion)(&pool->idle_cull_work) ->&rq->__lock FD: 285 BD: 2 +.+.: (work_completion)(&(&kfence_timer)->work) ->cpu_hotplug_lock ->allocation_wait.lock ->&rq->__lock ->&obj_hash[i].lock ->&base->lock ->&cfs_rq->removed.lock ->pool_lock#2 FD: 29 BD: 3 -.-.: allocation_wait.lock ->&p->pi_lock FD: 1 BD: 5126 -.-.: kfence_freelist_lock FD: 1 BD: 4460 ..-.: &meta->lock FD: 5 BD: 2 ....: rcu_tasks.cbs_gbl_lock ->rcu_tasks__percpu.cbs_pcpu_lock ->&ACCESS_PRIVATE(rtpcp, lock) FD: 3 BD: 3 ....: rcu_tasks__percpu.cbs_pcpu_lock ->&obj_hash[i].lock FD: 3 BD: 147 ....: &ACCESS_PRIVATE(rtpcp, lock) ->&obj_hash[i].lock FD: 5 BD: 2 ....: rcu_tasks_trace.cbs_gbl_lock ->rcu_tasks_trace__percpu.cbs_pcpu_lock ->&ACCESS_PRIVATE(rtpcp, lock) FD: 3 BD: 145 ....: rcu_tasks_trace__percpu.cbs_pcpu_lock ->&obj_hash[i].lock FD: 58 BD: 1 +.+.: rcu_tasks.tasks_gp_mutex ->rcu_tasks.cbs_gbl_lock ->&rq->__lock ->rcu_tasks__percpu.cbs_pcpu_lock ->&obj_hash[i].lock ->&base->lock ->tasks_rcu_exit_srcu_srcu_usage.lock ->&ACCESS_PRIVATE(sdp, lock) ->tasks_rcu_exit_srcu ->&x->wait#3 ->kernel/rcu/tasks.h:147 ->(&timer.timer) ->&x->wait#2 ->(console_sem).lock FD: 29 BD: 3 ....: &x->wait#2 ->&p->pi_lock FD: 29 BD: 209 ....: &rnp->exp_wq[2] ->&p->pi_lock FD: 32 BD: 6 ....: tasks_rcu_exit_srcu_srcu_usage.lock ->&obj_hash[i].lock ->&ACCESS_PRIVATE(sdp, lock) FD: 1 BD: 2 ....: tasks_rcu_exit_srcu FD: 41 BD: 2 +.+.: (work_completion)(&(&ssp->srcu_sup->work)->work) ->&ssp->srcu_sup->srcu_gp_mutex ->tasks_rcu_exit_srcu_srcu_usage.lock ->&obj_hash[i].lock ->&base->lock ->&ssp->srcu_sup->srcu_cb_mutex ->remove_cache_srcu_srcu_usage.lock ->wakeup_srcu_srcu_usage.lock ->&ACCESS_PRIVATE(ssp->srcu_sup, lock) ->&rq->__lock ->tracepoint_srcu_srcu_usage.lock ->&cfs_rq->removed.lock ->rcu_node_0 ->&rcu_state.expedited_wq ->pool_lock#2 FD: 40 BD: 3 +.+.: &ssp->srcu_sup->srcu_gp_mutex ->tasks_rcu_exit_srcu_srcu_usage.lock ->&ssp->srcu_sup->srcu_cb_mutex ->remove_cache_srcu_srcu_usage.lock ->&rq->__lock ->wakeup_srcu_srcu_usage.lock ->&ACCESS_PRIVATE(ssp->srcu_sup, lock) ->rcu_node_0 ->&rcu_state.expedited_wq ->tracepoint_srcu_srcu_usage.lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 29 BD: 22 ....: &x->wait#3 ->&p->pi_lock FD: 287 BD: 1 +.+.: rcu_tasks_trace.tasks_gp_mutex ->rcu_tasks_trace.cbs_gbl_lock ->&rq->__lock ->rcu_tasks_trace__percpu.cbs_pcpu_lock ->cpu_hotplug_lock ->&x->wait#2 ->&obj_hash[i].lock ->&base->lock ->(&timer.timer) ->(console_sem).lock FD: 5 BD: 1 -.-.: (null) ->tk_core.seq.seqcount FD: 31 BD: 1 ..-.: &(&ssp->srcu_sup->work)->timer FD: 37 BD: 4 +.+.: &ssp->srcu_sup->srcu_cb_mutex ->tasks_rcu_exit_srcu_srcu_usage.lock ->remove_cache_srcu_srcu_usage.lock ->wakeup_srcu_srcu_usage.lock ->&ACCESS_PRIVATE(ssp->srcu_sup, lock) ->&rq->__lock ->tracepoint_srcu_srcu_usage.lock ->&obj_hash[i].lock ->&base->lock FD: 33 BD: 2 +.+.: (work_completion)(&sdp->work) ->&ACCESS_PRIVATE(sdp, lock) ->&obj_hash[i].lock ->&x->wait#3 ->&rq->__lock ->&cfs_rq->removed.lock ->pool_lock#2 ->rcu_node_0 ->&rcu_state.expedited_wq FD: 1 BD: 2 ....: kernel/rcu/tasks.h:147 FD: 33 BD: 1 ..-.: &(&kfence_timer)->timer FD: 29 BD: 210 +.-.: (&timer.timer) ->&p->pi_lock FD: 29 BD: 209 ....: &rnp->exp_wq[3] ->&p->pi_lock FD: 1 BD: 1 ....: &nmi_desc[0].lock FD: 130 BD: 146 +.+.: smpboot_threads_lock ->fs_reclaim ->batched_entropy_u8.lock ->kfence_freelist_lock ->pool_lock#2 ->kthread_create_lock ->&p->pi_lock ->&x->wait ->&rq->__lock ->&obj_hash[i].lock ->&c->lock ->&n->list_lock FD: 29 BD: 4340 ..-.: &rcu_state.gp_wq ->&p->pi_lock FD: 28 BD: 254 -.-.: &stop_pi_lock ->&rq->__lock FD: 1 BD: 254 -.-.: &stopper->lock FD: 1 BD: 2 +.+.: (module_notify_list).rwsem FD: 1 BD: 1 +.+.: ddebug_lock FD: 1 BD: 1 .+.+: &pmus_srcu FD: 284 BD: 1 +.+.: watchdog_mutex ->cpu_hotplug_lock FD: 29 BD: 145 ....: &x->wait#4 ->&p->pi_lock FD: 1063 BD: 1 +.+.: (wq_completion)events ->(work_completion)(&sscs.work) ->pcpu_balance_work ->(work_completion)(&pwq->unbound_release_work) ->(shepherd).work ->(work_completion)(&rfkill_global_led_trigger_work) ->timer_update_work ->(work_completion)(&p->wq) ->(work_completion)(&(&group->avgs_work)->work) ->(work_completion)(&helper->damage_work) ->(work_completion)(&rfkill->sync_work) ->(work_completion)(&(&krcp->monitor_work)->work) ->(linkwatch_work).work ->(work_completion)(&w->work) ->(work_completion)(&vi->config_work) ->(debug_obj_work).work ->(work_completion)(&gadget->work) ->kernfs_notify_work ->async_lookup_work ->autoload_work ->(work_completion)(&barr->work) ->(work_completion)(&blkg->free_work) ->drain_vmap_work ->netstamp_work ->reg_work ->(work_completion)(&fw_work->work) ->(delayed_fput_work).work ->(work_completion)(&s->destroy_work) ->(work_completion)(&(&ovs_net->masks_rebalance)->work) ->(work_completion)(&aux->work) ->(work_completion)(&ht->run_work) ->(work_completion)(&w->w) ->(work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) ->(deferred_probe_timeout_work).work ->(work_completion)(&w->work)#2 ->(regulator_init_complete_work).work ->(work_completion)(&cgrp->bpf.release_work) ->deferred_process_work ->(work_completion)(&data->fib_event_work) ->(work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) ->(work_completion)(&(&hwstats->traffic_dw)->work) ->(work_completion)(&(&conn->info_timer)->work) ->(work_completion)(&rdev->wiphy_work) ->wireless_nlevent_work ->fqdir_free_work ->(work_completion)(&nlk->work) ->xfrm_state_gc_work ->(work_completion)(&msk->work) ->((ipv6_flowlabel_exclusive).work).work ->(work_completion)(&(&psock->rwork)->work) ->(work_completion)(&net->xfrm.policy_hthresh.work) ->free_ipc_work ->(work_completion)(&smcibdev->port_event_work) ->(work_completion)(&crct10dif_rehash_work) ->&rq->__lock ->(work_completion)(&umem->work) ->(ima_keys_delayed_work).work ->(work_completion)(&work->work)#3 ->((tcp_md5_needed).work).work ->(work_completion)(&old_rcpu->kthread_stop_wq) ->(work_completion)(&pool->work) ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->(work_completion)(&(&krcp->page_cache_work)->work) FD: 31 BD: 2 +.+.: (work_completion)(&sscs.work) ->&x->wait#5 ->&obj_hash[i].lock ->hrtimer_bases.lock ->&x->wait#4 FD: 1 BD: 147 -.-.: &x->wait#5 FD: 1 BD: 1 ....: rcu_callback FD: 2 BD: 219 +.+.: &newf->file_lock ->&newf->resize_wait FD: 1 BD: 1 ....: &p->vtime.seqcount FD: 40 BD: 144 +.+.: mem_hotplug_lock ->mem_hotplug_lock.rss.gp_wait.lock FD: 3 BD: 145 ..-.: mem_hotplug_lock.rss.gp_wait.lock ->&obj_hash[i].lock FD: 1 BD: 144 ....: mem_hotplug_lock.waiters.lock FD: 287 BD: 1 +.+.: cpu_add_remove_lock ->cpu_hotplug_lock ->cpu_hotplug_lock.waiters.lock ->cpu_hotplug_lock.rss.gp_wait.lock ->spec_ctrl_mutex ->cpuset_hotplug_work FD: 3 BD: 144 ..-.: cpu_hotplug_lock.rss.gp_wait.lock ->&obj_hash[i].lock FD: 1 BD: 2 ....: cpu_hotplug_lock.waiters.lock FD: 1 BD: 8 +.+.: cpuset_hotplug_work FD: 1 BD: 145 +.+.: pcp_batch_high_lock FD: 1 BD: 144 +.+.: relay_channels_mutex FD: 1 BD: 152 ....: rtc_lock FD: 165 BD: 150 +.+.: sparse_irq_lock ->tk_core.seq.seqcount ->rtc_lock ->&x->wait#6 ->&rq->__lock ->&p->pi_lock ->&irq_desc_lock_class ->fs_reclaim ->pool_lock#2 ->lock ->&root->kernfs_rwsem ->&c->lock ->&____s->seqcount ->pcpu_alloc_mutex ->&obj_hash[i].lock ->&zone->lock ->&n->list_lock FD: 29 BD: 151 ....: &x->wait#6 ->&p->pi_lock FD: 1 BD: 4962 ....: &rq->__lock/1 FD: 1 BD: 4963 -.-.: &cfs_rq->removed.lock FD: 1 BD: 151 ....: &x->wait#7 FD: 18 BD: 4962 -...: &rt_b->rt_runtime_lock ->&rt_rq->rt_runtime_lock ->tk_core.seq.seqcount ->hrtimer_bases.lock FD: 1 BD: 4963 -...: &rt_rq->rt_runtime_lock FD: 31 BD: 144 +.+.: stop_cpus_mutex ->&stopper->lock ->&stop_pi_lock ->&rq->__lock ->&x->wait#8 FD: 1 BD: 146 ....: &x->wait#8 FD: 135 BD: 1 +.+.: sched_domains_mutex ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->pcpu_alloc_mutex ->&zone->lock ->&____s->seqcount ->&c->lock ->pcpu_lock FD: 1 BD: 4962 ....: &cp->lock FD: 1 BD: 1 +.+.: (memory_chain).rwsem FD: 141 BD: 1 +.+.: &type->s_umount_key#5/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&obj_hash[i].lock ->percpu_counters_lock ->crngs.lock ->&sbinfo->stat_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#5 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->batched_entropy_u32.lock ->&dentry->d_lock FD: 56 BD: 4426 +.+.: &sb->s_type->i_lock_key#5 ->&dentry->d_lock ->&xa->xa_lock#8 FD: 29 BD: 1 ....: (setup_done).wait.lock ->&p->pi_lock FD: 143 BD: 26 ++++: namespace_sem ->fs_reclaim ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&c->lock ->mnt_id_ida.xa_lock ->pcpu_alloc_mutex ->&dentry->d_lock ->mount_lock ->rename_lock ->&obj_hash[i].lock ->&new_ns->ns_lock ->&rq->__lock ->stock_lock ->&____s->seqcount#2 ->&n->list_lock ->namespace_sem.wait_lock FD: 1 BD: 209 +.+.: &____s->seqcount#3 FD: 129 BD: 1 +.+.: &type->s_umount_key#6 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->&zone->lock ->&____s->seqcount ->&c->lock ->&lru->node[i].lock ->&sbinfo->stat_lock ->&obj_hash[i].lock FD: 29 BD: 4490 +.+.: &lru->node[i].lock FD: 135 BD: 8 ++++: &sb->s_type->i_mutex_key ->namespace_sem ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->rename_lock.seqcount ->tomoyo_ss ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#2 ->&wb->list_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->&obj_hash[i].lock ->&rq->__lock ->&dentry->d_lock/1 ->&cfs_rq->removed.lock ->rcu_node_0 FD: 43 BD: 87 +.+.: rename_lock ->rename_lock.seqcount ->&dentry->d_lock FD: 42 BD: 217 +.+.: rename_lock.seqcount ->&dentry->d_lock ->&dentry->d_lock/2 FD: 1 BD: 197 ....: &new_ns->poll FD: 2 BD: 4491 +.+.: &____s->seqcount#4 ->&____s->seqcount#4/1 FD: 43 BD: 217 +.+.: &fs->lock ->&____s->seqcount#3 ->&dentry->d_lock FD: 1 BD: 177 +.+.: req_lock FD: 150 BD: 1 +.+.: of_mutex ->fs_reclaim ->pool_lock#2 ->lock ->&root->kernfs_rwsem FD: 1 BD: 214 ....: &x->wait#9 FD: 1 BD: 241 +.+.: &k->list_lock FD: 28 BD: 220 ++++: bus_type_sem ->&rq->__lock FD: 33 BD: 4477 -...: &dev->power.lock ->&dev->power.lock/1 ->&dev->power.wait_queue ->hrtimer_bases.lock FD: 40 BD: 216 +.+.: dpm_list_mtx ->(console_sem).lock ->&rq->__lock FD: 138 BD: 227 +.+.: uevent_sock_mutex ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->&c->lock ->nl_table_lock ->&obj_hash[i].lock ->nl_table_wait.lock ->&zone->lock ->&rq->__lock ->&cfs_rq->removed.lock ->rlock-AF_NETLINK ->rcu_node_0 ->batched_entropy_u8.lock ->kfence_freelist_lock ->remove_cache_srcu ->&n->list_lock ->mmu_notifier_invalidate_range_start ->uevent_sock_mutex.wait_lock ->quarantine_lock ->&____s->seqcount#2 ->&meta->lock ->&rcu_state.expedited_wq ->pgd_lock ->stock_lock ->key ->pcpu_lock ->percpu_counters_lock ->&base->lock FD: 1 BD: 206 ....: running_helpers_waitq.lock FD: 1 BD: 226 +.+.: sysfs_symlink_target_lock FD: 2 BD: 291 +.+.: &k->k_lock ->klist_remove_lock FD: 1 BD: 1 ....: &dev->mutex FD: 1 BD: 1 +.+.: subsys mutex FD: 6 BD: 1 +.+.: memory_blocks.xa_lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&obj_hash[i].lock FD: 1 BD: 1 +.+.: subsys mutex#2 FD: 130 BD: 12 +.+.: register_lock ->proc_subdir_lock ->fs_reclaim ->pool_lock#2 ->proc_inum_ida.xa_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->&obj_hash[i].lock FD: 1 BD: 2 +.+.: (pm_chain_head).rwsem FD: 1 BD: 1 +.+.: cpufreq_governor_mutex FD: 41 BD: 2 +.+.: (work_completion)(&rew->rew_work) ->rcu_node_0 ->rcu_state.exp_wake_mutex ->&rcu_state.expedited_wq ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->(&timer.timer) ->&pool->lock ->pool_lock#2 ->pool_lock ->&cfs_rq->removed.lock ->rcu_state.exp_wake_mutex.wait_lock ->&p->pi_lock FD: 1 BD: 1 +.+.: dyn_event_ops_mutex FD: 1 BD: 2 ++++: binfmt_lock FD: 1 BD: 104 +.+.: pin_fs_lock FD: 140 BD: 1 +.+.: &type->s_umount_key#7/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&____s->seqcount ->&c->lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#6 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 42 BD: 3 +.+.: &sb->s_type->i_lock_key#6 ->&dentry->d_lock FD: 130 BD: 1 +.+.: &sb->s_type->i_mutex_key#2 ->&sb->s_type->i_lock_key#6 ->rename_lock.seqcount ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&c->lock ->&zone->lock ->&____s->seqcount ->&obj_hash[i].lock FD: 29 BD: 4491 ....: &wq ->&p->pi_lock FD: 1 BD: 37 +.+.: chrdevs_lock FD: 936 BD: 1 ++++: cb_lock ->genl_mutex ->fs_reclaim ->pool_lock#2 ->rlock-AF_NETLINK ->rtnl_mutex ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount ->&n->list_lock ->&rdev->wiphy.mtx ->quarantine_lock ->remove_cache_srcu ->nlk_cb_mutex-GENERIC ->genl_mutex.wait_lock ->&p->pi_lock ->&rq->__lock ->rtnl_mutex.wait_lock ->&lock->wait_lock ->&____s->seqcount#2 ->tk_core.seq.seqcount ->batched_entropy_u8.lock ->kfence_freelist_lock ->&dir->lock#2 ->rcu_node_0 ->(console_sem).lock ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock ->&devlink->lock_key ->&devlink->lock_key#3 ->ovs_mutex ->&sdata->lock ->cpu_hotplug_lock ->stock_lock ->pcpu_alloc_mutex ->crngs.lock ->nl_table_lock ->nl_table_wait.lock ->&fw_cache.lock ->async_lock ->init_task.alloc_lock ->&dentry->d_lock ->&type->i_mutex_dir_key#3 ->&sb->s_type->i_lock_key#22 ->umhelper_sem ->fw_lock ->&devlink->lock_key#2 FD: 899 BD: 5 +.+.: genl_mutex ->fs_reclaim ->pool_lock#2 ->nl_table_lock ->nl_table_wait.lock ->&c->lock ->&____s->seqcount ->&obj_hash[i].lock ->rlock-AF_NETLINK ->&n->list_lock ->rtnl_mutex ->genl_mutex.wait_lock ->&rq->__lock ->rtnl_mutex.wait_lock ->&p->pi_lock ->&zone->lock ->hwsim_radio_lock ->&x->wait#9 ->batched_entropy_u32.lock ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->subsys mutex#53 ->device_links_lock ->&k->k_lock ->deferred_probe_mutex ->cpu_hotplug_lock ->wq_pool_mutex ->crngs.lock ->triggers_list_lock ->leds_list_lock ->rfkill_global_mutex ->rfkill_global_mutex.wait_lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->(inetaddr_chain).rwsem ->inet6addr_chain.lock ->&____s->seqcount#2 ->&sem->wait_lock ->remove_cache_srcu ->rcu_node_0 ->&rcu_state.expedited_wq ->(console_sem).lock ->console_owner_lock ->console_owner ->&base->lock ->(&timer.timer) ->&cfs_rq->removed.lock ->tcp_metrics_lock ->quarantine_lock ->nbd_index_mutex ->&nbd->config_lock ->&pn->l2tp_tunnel_idr_lock ->stock_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#8 ->&dir->lock ->l2tp_ip_lock ->k-sk_lock-AF_INET ->k-slock-AF_INET ->k-clock-AF_INET ->&xa->xa_lock#8 ->&fsnotify_mark_srcu ->&sdata->sec_mtx ->__ip_vs_mutex ->key#25 ->team->team_lock_key#3 ->&ht->lock ->&pernet->lock ->&pnettable->lock ->rcu_state.barrier_mutex ->dev_base_lock ->lweventlist_lock ->krc.lock ->&dir->lock#2 ->netdev_unregistering_wq.lock ->&bat_priv->tp_list_lock ->kthread_create_lock ->&x->wait ->&fn->fou_lock ->k-sk_lock-AF_INET6 ->k-slock-AF_INET6 ->k-clock-AF_INET6 ->k-sk_lock-AF_TIPC ->k-slock-AF_TIPC ->calipso_doi_list_lock ->&pool->lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&table->hash[i].lock ->vdpa_dev_lock FD: 140 BD: 1 +.+.: &type->s_umount_key#8/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#7 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 42 BD: 97 +.+.: &sb->s_type->i_lock_key#7 ->&dentry->d_lock FD: 135 BD: 95 +.+.: &sb->s_type->i_mutex_key#3 ->&sb->s_type->i_lock_key#7 ->rename_lock.seqcount ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&c->lock ->&zone->lock ->&____s->seqcount ->&obj_hash[i].lock ->&rq->__lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&n->list_lock ->(console_sem).lock ->remove_cache_srcu ->pin_fs_lock ->mount_lock ->&fsnotify_mark_srcu ->&xa->xa_lock#8 ->&____s->seqcount#2 ->rcu_node_0 ->&xa->xa_lock#4 ->stock_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 1 BD: 4 +.+.: subsys mutex#3 FD: 4 BD: 7 ....: async_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 387 BD: 2 +.+.: (work_completion)(&entry->work) ->tk_core.seq.seqcount ->&dev->power.lock ->&k->list_lock ->sysfs_symlink_target_lock ->fs_reclaim ->pool_lock#2 ->lock ->&root->kernfs_rwsem ->&x->wait#9 ->&obj_hash[i].lock ->&c->lock ->&zone->lock ->&____s->seqcount ->uevent_sock_mutex ->running_helpers_waitq.lock ->&k->k_lock ->async_lock ->async_done.lock ->&dentry->d_lock ->&sb->s_type->i_mutex_key ->sb_writers#2 ->quarantine_lock ->&pool->lock/1 ->cpu_hotplug_lock ->wq_pool_mutex ->&n->list_lock ->pcpu_alloc_mutex ->batched_entropy_u32.lock ->mmu_notifier_invalidate_range_start ->blk_queue_ida.xa_lock ->&q->sysfs_lock ->&set->tag_list_lock ->bio_slab_lock ->percpu_counters_lock ->&sb->s_type->i_lock_key#3 ->&s->s_inode_list_lock ->&xa->xa_lock#9 ->&q->mq_freeze_lock ->percpu_ref_switch_lock ->&q->queue_lock ->&rq->__lock ->major_names_lock ->floppy_lock ->rtc_lock ->&wq->mutex ->&desc->request_mutex ->register_lock ->&irq_desc_lock_class ->proc_subdir_lock ->proc_inum_ida.xa_lock ->resource_lock ->&base->lock ->(&timer.timer) ->command_done.lock ->&shost->scan_mutex ->(console_sem).lock ->console_owner_lock ->console_owner ->async_scan_lock ->&q->debugfs_mutex ->klist_remove_lock ->kernfs_idr_lock ->&x->wait#10 ->(&motor_off_timer[drive]) ->&xa->xa_lock#10 ->&q->unused_hctx_lock ->(&sq->pending_timer) ->(work_completion)(&td->dispatch_work) ->&q->blkcg_mutex ->pcpu_lock ->&xa->xa_lock#8 ->&fsnotify_mark_srcu FD: 1 BD: 22 .+.+: device_links_srcu FD: 3 BD: 21 +.+.: fwnode_link_lock ->&k->k_lock FD: 1 BD: 3 +.+.: regulator_list_mutex FD: 31 BD: 111 +.+.: device_links_lock ->&k->list_lock ->&k->k_lock ->&rq->__lock FD: 1 BD: 4 ....: &dev->devres_lock FD: 1 BD: 4 +.+.: regulator_nesting_mutex FD: 2 BD: 1 +.+.: regulator_ww_class_mutex ->regulator_nesting_mutex FD: 154 BD: 189 +.+.: gdp_mutex ->&k->list_lock ->fs_reclaim ->pool_lock#2 ->lock ->&root->kernfs_rwsem ->&c->lock ->&zone->lock ->&____s->seqcount ->kobj_ns_type_lock ->&obj_hash[i].lock ->&rq->__lock ->&cfs_rq->removed.lock ->sysfs_symlink_target_lock ->&____s->seqcount#2 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&sem->wait_lock ->&p->pi_lock ->kernfs_idr_lock ->&n->list_lock ->gdp_mutex.wait_lock FD: 3 BD: 3 +.+.: subsys mutex#4 ->&k->k_lock FD: 28 BD: 111 +.+.: deferred_probe_mutex ->&rq->__lock FD: 1 BD: 20 ....: probe_waitqueue.lock FD: 1 BD: 3 ....: async_done.lock FD: 139 BD: 1 +.+.: &type->s_umount_key#9/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&____s->seqcount ->&c->lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#8 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 43 BD: 4428 +.+.: &sb->s_type->i_lock_key#8 ->&dentry->d_lock ->bit_wait_table + i FD: 137 BD: 84 +.+.: pack_mutex ->fs_reclaim ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->free_vmap_area_lock ->vmap_area_lock ->init_mm.page_table_lock ->&rq->__lock ->vmap_purge_lock ->cpa_lock ->text_mutex ->text_mutex.wait_lock ->pack_mutex.wait_lock ->&pool->lock FD: 128 BD: 83 +.+.: &fp->aux->used_maps_mutex ->&rq->__lock ->fs_reclaim ->&c->lock ->pool_lock#2 ->&obj_hash[i].lock FD: 1 BD: 1 +.+.: proto_list_mutex FD: 1 BD: 1 +.+.: targets_mutex FD: 30 BD: 4054 ...-: nl_table_lock ->pool_lock#2 ->nl_table_wait.lock ->&obj_hash[i].lock FD: 29 BD: 4055 ..-.: nl_table_wait.lock ->&p->pi_lock FD: 1 BD: 1 +.+.: net_family_lock FD: 8 BD: 5 ....: net_generic_ids.xa_lock ->&c->lock ->&____s->seqcount ->pool_lock#2 FD: 7 BD: 3837 ..-.: &dir->lock ->&obj_hash[i].lock ->pool_lock#2 ->quarantine_lock ->&meta->lock ->kfence_freelist_lock FD: 36 BD: 5 +.+.: k-sk_lock-AF_NETLINK ->k-slock-AF_NETLINK FD: 1 BD: 6 +...: k-slock-AF_NETLINK FD: 32 BD: 3845 ..-.: rhashtable_bucket ->rhashtable_bucket/1 FD: 44 BD: 147 ....: freezer_lock ->&sighand->siglock FD: 1 BD: 1 ....: audit_backlog_wait.lock FD: 29 BD: 10 ....: kauditd_wait.lock ->&p->pi_lock FD: 1 BD: 10 ....: &list->lock FD: 3 BD: 2 +.+.: lock#2 ->&zone->lock FD: 132 BD: 1 +.+.: khugepaged_mutex ->fs_reclaim ->pool_lock#2 ->kthread_create_lock ->&p->pi_lock ->&x->wait ->&rq->__lock ->&obj_hash[i].lock ->lock#2 ->pcp_batch_high_lock FD: 3 BD: 14 +.+.: subsys mutex#5 ->&k->k_lock FD: 4 BD: 1 +.+.: subsys mutex#6 ->&k->list_lock ->&k->k_lock FD: 1 BD: 1 +.+.: regmap_debugfs_early_lock FD: 1 BD: 1 +.+.: (acpi_reconfig_chain).rwsem FD: 1 BD: 1 +.+.: __i2c_board_lock FD: 131 BD: 1 +.+.: core_lock ->&k->list_lock ->&k->k_lock ->fs_reclaim ->pool_lock#2 FD: 2 BD: 1 +.+.: thermal_governor_lock ->thermal_list_lock FD: 1 BD: 2 +.+.: thermal_list_lock FD: 151 BD: 1 +.+.: cpuidle_lock ->&obj_hash[i].lock ->(console_sem).lock ->fs_reclaim ->pool_lock#2 ->lock ->&root->kernfs_rwsem ->&c->lock ->&zone->lock ->&____s->seqcount FD: 132 BD: 3 +.+.: k-sk_lock-AF_QIPCRTR ->k-slock-AF_QIPCRTR ->fs_reclaim ->qrtr_ports.xa_lock ->pool_lock#2 ->qrtr_node_lock ->&obj_hash[i].lock ->rlock-AF_QIPCRTR FD: 1 BD: 4 +...: k-slock-AF_QIPCRTR FD: 9 BD: 7 +.+.: qrtr_ports.xa_lock ->&c->lock ->pool_lock#2 ->&obj_hash[i].lock FD: 1 BD: 7 +.+.: qrtr_node_lock FD: 29 BD: 4390 -.-.: &rcu_state.expedited_wq ->&p->pi_lock FD: 1 BD: 1 ....: printk_ratelimit_state.lock FD: 130 BD: 160 ++++: (crypto_chain).rwsem ->fs_reclaim ->pool_lock#2 ->&c->lock ->&zone->lock ->&____s->seqcount ->kthread_create_lock ->&p->pi_lock ->&x->wait ->&rq->__lock ->&obj_hash[i].lock ->&____s->seqcount#2 FD: 285 BD: 1 +.+.: iova_cache_mutex ->cpu_hotplug_lock ->slab_mutex FD: 3 BD: 1 +.+.: subsys mutex#7 ->&k->k_lock FD: 1 BD: 150 ....: pci_config_lock FD: 1 BD: 1 +.+.: subsys mutex#8 FD: 129 BD: 104 +.+.: dev_pm_qos_mtx ->fs_reclaim ->pool_lock#2 ->&dev->power.lock ->pm_qos_lock ->&c->lock ->&____s->seqcount ->&zone->lock ->&rq->__lock FD: 1 BD: 105 ....: pm_qos_lock FD: 153 BD: 103 +.+.: dev_pm_qos_sysfs_mtx ->dev_pm_qos_mtx ->&root->kernfs_rwsem ->fs_reclaim ->pool_lock#2 ->lock ->&c->lock ->&____s->seqcount ->&zone->lock ->&rq->__lock ->&sem->wait_lock ->&p->pi_lock ->dev_pm_qos_sysfs_mtx.wait_lock FD: 128 BD: 1 +.+.: mtrr_mutex ->fs_reclaim ->pool_lock#2 FD: 1 BD: 1 ..-.: uidhash_lock FD: 1 BD: 1 +.+.: detected_devices_mutex FD: 135 BD: 1 +.+.: (work_completion)(&eval_map_work) ->trace_event_sem ->trace_event_sem.wait_lock ->&p->pi_lock FD: 1 BD: 1 ....: oom_reaper_wait.lock FD: 1 BD: 1 +.+.: subsys mutex#9 FD: 29 BD: 4410 ....: &pgdat->kcompactd_wait ->&p->pi_lock FD: 135 BD: 2 +.+.: pcpu_balance_work ->pcpu_alloc_mutex ->pcpu_alloc_mutex.wait_lock ->&p->pi_lock ->&rq->__lock FD: 170 BD: 1 +.+.: memory_tier_lock ->fs_reclaim ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&zone->lock ->&____s->seqcount ->&k->k_lock ->&c->lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#10 FD: 1 BD: 2 +.+.: subsys mutex#10 FD: 1 BD: 1 +.+.: ksm_thread_mutex FD: 1 BD: 1 ....: ksm_thread_wait.lock FD: 1 BD: 2 +.+.: damon_ops_lock FD: 132 BD: 159 ++++: crypto_alg_sem ->(crypto_chain).rwsem ->&rq->__lock ->crypto_alg_sem.wait_lock ->&pool->lock FD: 42 BD: 4 +.+.: lock#3 ->&obj_hash[i].lock ->&rq->__lock ->(work_completion)(work) ->&x->wait#10 ->&cfs_rq->removed.lock ->pool_lock FD: 1 BD: 147 +.+.: khugepaged_mm_lock FD: 29 BD: 147 ....: khugepaged_wait.lock ->&p->pi_lock FD: 1 BD: 4449 ..-.: quarantine_lock FD: 39 BD: 4145 .+.+: remove_cache_srcu ->quarantine_lock ->&c->lock ->&n->list_lock ->pool_lock#2 ->&obj_hash[i].lock ->&rq->__lock ->&____s->seqcount ->rcu_node_0 ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock ->pgd_lock ->stock_lock ->key ->pcpu_lock ->percpu_counters_lock ->&base->lock ->&meta->lock ->kfence_freelist_lock ->pool_lock FD: 148 BD: 2 +.+.: (work_completion)(&pwq->unbound_release_work) ->&wq->mutex ->wq_pool_mutex ->&obj_hash[i].lock ->pool_lock#2 ->rcu_node_0 ->&pool->lock ->&rnp->exp_wq[0] ->&rq->__lock ->&rnp->exp_wq[1] ->&rnp->exp_lock ->rcu_state.exp_mutex ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock ->&rnp->exp_wq[3] ->&base->lock ->&cfs_rq->removed.lock ->&rnp->exp_wq[2] ->&rcu_state.expedited_wq ->pool_lock FD: 158 BD: 5 +.+.: bio_slab_lock ->fs_reclaim ->pool_lock#2 ->slab_mutex ->bio_slabs.xa_lock FD: 6 BD: 6 +.+.: bio_slabs.xa_lock ->pool_lock#2 ->&zone->lock ->&____s->seqcount ->&obj_hash[i].lock FD: 129 BD: 3 +.+.: major_names_lock ->fs_reclaim ->pool_lock#2 ->major_names_spinlock FD: 1 BD: 4 +.+.: major_names_spinlock FD: 3 BD: 14 +.+.: subsys mutex#11 ->&k->k_lock FD: 1 BD: 1 ....: *(&acpi_gbl_hardware_lock) FD: 40 BD: 1 ....: *(&acpi_gbl_gpe_lock) ->(console_sem).lock FD: 5 BD: 162 ....: mask_lock ->tmp_mask_lock FD: 4 BD: 163 -.-.: tmp_mask_lock ->vector_lock ->ioapic_lock FD: 1 BD: 1 -...: shrink_qlist.lock FD: 32 BD: 5 ....: remove_cache_srcu_srcu_usage.lock ->&obj_hash[i].lock ->&ACCESS_PRIVATE(sdp, lock) FD: 35 BD: 144 +.+.: flush_lock ->&obj_hash[i].lock ->(work_completion)(&sfw->work) ->&x->wait#10 ->&rq->__lock FD: 10 BD: 146 +.+.: (work_completion)(&sfw->work) ->&c->lock ->&n->list_lock ->&obj_hash[i].lock FD: 32 BD: 145 +.+.: (wq_completion)slub_flushwq ->(work_completion)(&sfw->work) ->(work_completion)(&barr->work) FD: 29 BD: 4522 ....: &x->wait#10 ->&p->pi_lock FD: 30 BD: 152 +.+.: (work_completion)(&barr->work) ->&x->wait#10 ->&rq->__lock FD: 1 BD: 1 +.+.: system_transition_mutex FD: 1 BD: 1 +.+.: (power_off_prep_handler_list).rwsem FD: 1 BD: 1 ....: power_off_handler_list.lock FD: 1 BD: 1 +.+.: (restart_prep_handler_list).rwsem FD: 1 BD: 1 +.+.: (reboot_notifier_list).rwsem FD: 199 BD: 1 +.+.: acpi_scan_lock ->semaphore->lock ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->&x->wait#9 ->acpi_device_lock ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&k->k_lock ->&dev->power.lock ->dpm_list_mtx ->subsys mutex#12 ->&____s->seqcount ->&zone->lock ->uevent_sock_mutex ->running_helpers_waitq.lock ->&c->lock ->*(&acpi_gbl_reference_count_lock) ->&n->list_lock ->&rq->__lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->pci_config_lock ->quarantine_lock ->(console_sem).lock ->pci_bus_sem ->pci_mmcfg_lock ->resource_lock ->&device->physical_node_lock ->fwnode_link_lock ->devtree_lock ->gdp_mutex ->subsys mutex#13 ->pci_acpi_companion_lookup_sem ->pci_slot_mutex ->tk_core.seq.seqcount ->resource_alignment_lock ->device_links_srcu ->subsys mutex#14 ->acpi_pm_notifier_install_lock ->pci_rescan_remove_lock ->subsys mutex#3 ->acpi_link_lock ->acpi_dep_list_lock ->wakeup_ida.xa_lock ->subsys mutex#15 ->events_lock ->power_resource_list_lock FD: 130 BD: 2 +.+.: acpi_device_lock ->fs_reclaim ->pool_lock#2 ->&xa->xa_lock#2 ->semaphore->lock ->&obj_hash[i].lock ->&c->lock ->&zone->lock ->&____s->seqcount FD: 1 BD: 3 ....: &xa->xa_lock#2 FD: 1 BD: 2 +.+.: subsys mutex#12 FD: 1 BD: 2 ++++: pci_bus_sem FD: 1 BD: 2 +.+.: pci_mmcfg_lock FD: 151 BD: 12 +.+.: &device->physical_node_lock ->sysfs_symlink_target_lock ->fs_reclaim ->pool_lock#2 ->lock ->&root->kernfs_rwsem ->&c->lock ->&____s->seqcount FD: 3 BD: 2 +.+.: subsys mutex#13 ->&k->k_lock FD: 1 BD: 2 .+.+: pci_acpi_companion_lookup_sem FD: 1 BD: 2 +.+.: pci_slot_mutex FD: 1 BD: 2 +.+.: resource_alignment_lock FD: 1 BD: 4478 ....: &dev->power.lock/1 FD: 1 BD: 2 +.+.: subsys mutex#14 FD: 175 BD: 2 +.+.: acpi_pm_notifier_install_lock ->semaphore->lock ->fs_reclaim ->pool_lock#2 ->*(&acpi_gbl_reference_count_lock) ->acpi_pm_notifier_lock ->&c->lock ->&____s->seqcount FD: 173 BD: 3 +.+.: acpi_pm_notifier_lock ->fs_reclaim ->pool_lock#2 ->wakeup_ida.xa_lock ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->uevent_sock_mutex ->running_helpers_waitq.lock ->&k->k_lock ->subsys mutex#15 ->events_lock FD: 1 BD: 7 ....: wakeup_ida.xa_lock FD: 3 BD: 7 +.+.: subsys mutex#15 ->&k->k_lock FD: 1 BD: 7 ....: events_lock FD: 130 BD: 1 +.+.: &pgdat->kswapd_lock ->fs_reclaim ->pool_lock#2 ->kthread_create_lock ->&p->pi_lock ->&rq->__lock ->&x->wait ->&obj_hash[i].lock ->&cfs_rq->removed.lock FD: 37 BD: 2 +.+.: pci_rescan_remove_lock FD: 33 BD: 1 ..-.: drivers/char/random.c:251 FD: 14 BD: 2 +.+.: (next_reseed).work ->&obj_hash[i].lock ->&base->lock ->input_pool.lock ->base_crng.lock FD: 131 BD: 2 +.+.: acpi_link_lock ->fs_reclaim ->pool_lock#2 ->semaphore->lock ->&obj_hash[i].lock ->*(&acpi_gbl_reference_count_lock) ->pci_config_lock ->&____s->seqcount ->(console_sem).lock ->&zone->lock ->&c->lock FD: 1 BD: 2 +.+.: acpi_dep_list_lock FD: 1 BD: 2 +.+.: power_resource_list_lock FD: 176 BD: 7 ++++: &(&priv->bus_notifier)->rwsem ->fs_reclaim ->pool_lock#2 ->lock ->&root->kernfs_rwsem ->&c->lock ->&zone->lock ->&____s->seqcount ->i2c_dev_list_lock ->&x->wait#9 ->&obj_hash[i].lock ->chrdevs_lock ->&k->list_lock ->gdp_mutex ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&rq->__lock ->&x->wait#11 ->uevent_sock_mutex ->running_helpers_waitq.lock ->&k->k_lock ->subsys mutex#66 FD: 140 BD: 1 +.+.: &type->s_umount_key#10/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&zone->lock ->&____s->seqcount ->&c->lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#9 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 42 BD: 2 +.+.: &sb->s_type->i_lock_key#9 ->&dentry->d_lock FD: 140 BD: 1 +.+.: &type->s_umount_key#11/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#10 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 42 BD: 2 +.+.: &sb->s_type->i_lock_key#10 ->&dentry->d_lock FD: 212 BD: 146 ++++: &mm->mmap_lock ->reservation_ww_class_acquire ->fs_reclaim ->pool_lock#2 ->&zone->lock ->&____s->seqcount ->&mm->page_table_lock ->ptlock_ptr(page) ->&c->lock ->&anon_vma->rwsem ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->ptlock_ptr(page)#2 ->lock#4 ->lock#5 ->mmu_notifier_invalidate_range_start ->&vma->vm_lock->lock ->&obj_hash[i].lock ->&lruvec->lru_lock ->&rq->__lock ->rcu_node_0 ->&cfs_rq->removed.lock ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->quarantine_lock ->&mapping->i_mmap_rwsem ->resource_lock ->&p->alloc_lock ->tk_core.seq.seqcount ->&mm->mmap_lock/1 ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->&meta->lock ->&sem->wait_lock ->&p->pi_lock ->remove_cache_srcu ->&folio_wait_table[i] ->&rcu_state.expedited_wq ->khugepaged_mm_lock ->khugepaged_wait.lock ->&xa->xa_lock#8 ->&info->lock ->mount_lock ->&sb->s_type->i_lock_key ->&wb->list_lock ->&kcov->lock ->stock_lock ->&____s->seqcount#2 ->sb_pagefaults ->&mapping->private_lock ->&sb->s_type->i_lock_key#22 ->&base->lock ->&xa->xa_lock#4 ->&s->s_inode_list_lock ->batched_entropy_u32.lock ->vmap_area_lock ->&sb->s_type->i_mutex_key#21 ->&hugetlbfs_i_mmap_rwsem_key ->&vma_lock->rw_sema ->&dd->lock ->&hugetlb_fault_mutex_table[i] ->mapping.invalidate_lock ->&map->freeze_mutex ->lock#10 ->(console_sem).lock ->console_owner_lock ->console_owner ->&pgdat->kswapd_wait FD: 136 BD: 163 +.+.: reservation_ww_class_acquire ->reservation_ww_class_mutex FD: 135 BD: 164 +.+.: reservation_ww_class_mutex ->fs_reclaim ->&shmem->vmap_lock FD: 69 BD: 4410 ++++: &mapping->i_mmap_rwsem ->&obj_hash[i].lock ->pool_lock#2 ->&anon_vma->rwsem ->&____s->seqcount ->quarantine_lock ->&rq->__lock ->&sem->wait_lock ->mmu_notifier_invalidate_range_start ->ptlock_ptr(page) ->rcu_node_0 ->&p->pi_lock ->pool_lock ->&base->lock ->&cfs_rq->removed.lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->&meta->lock ->kfence_freelist_lock ->ptlock_ptr(page)#2 ->lock#4 ->lock#5 ->&rcu_state.expedited_wq ->stock_lock FD: 1 BD: 4420 +.+.: dma_fence_map FD: 29 BD: 3 +.+.: delayed_uprobe_lock ->&rq->__lock ->delayed_uprobe_lock.wait_lock FD: 1 BD: 4285 ....: key FD: 28 BD: 4 +.+.: attribute_container_mutex ->&rq->__lock FD: 143 BD: 18 ++++: triggers_list_lock ->&led_cdev->trigger_lock FD: 143 BD: 18 ++++: leds_list_lock ->&led_cdev->trigger_lock FD: 182 BD: 2 ++++: (usb_notifier_list).rwsem ->fs_reclaim ->pool_lock#2 ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&c->lock ->&____s->seqcount ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&rq->__lock ->&x->wait#11 ->uevent_sock_mutex ->running_helpers_waitq.lock ->&k->k_lock ->subsys mutex#60 ->mon_lock ->&zone->lock ->&cfs_rq->removed.lock FD: 1 BD: 1 +.+.: rc_map_lock FD: 1 BD: 1 +.+.: subsys mutex#16 FD: 1 BD: 2 +.+.: &entry->access FD: 130 BD: 2 +.+.: info_mutex ->proc_subdir_lock ->fs_reclaim ->pool_lock#2 ->proc_inum_ida.xa_lock ->&c->lock ->&____s->seqcount ->&zone->lock ->&obj_hash[i].lock FD: 1 BD: 190 +.+.: kobj_ns_type_lock FD: 12 BD: 81 +.+.: &xa->xa_lock#3 ->pool_lock#2 ->&c->lock ->&____s->seqcount ->&obj_hash[i].lock ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&____s->seqcount#2 FD: 31 BD: 1 ..-.: mm/vmstat.c:2018 FD: 284 BD: 2 +.+.: (shepherd).work ->cpu_hotplug_lock ->&obj_hash[i].lock ->&base->lock ->&rq->__lock FD: 38 BD: 5 +.+.: (wq_completion)mm_percpu_wq ->(work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) ->(work_completion)(work) ->(work_completion)(&barr->work) FD: 29 BD: 6 +.+.: (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) ->&obj_hash[i].lock ->&base->lock ->&pcp->lock ->&rq->__lock FD: 30 BD: 75 +.+.: subsys mutex#17 ->&k->k_lock ->&rq->__lock ->&cfs_rq->removed.lock FD: 7 BD: 3918 ..-.: &dir->lock#2 ->&obj_hash[i].lock ->pool_lock#2 ->&meta->lock ->kfence_freelist_lock ->quarantine_lock FD: 36 BD: 78 +.+.: dev_hotplug_mutex ->&dev->power.lock ->&rq->__lock ->&k->k_lock FD: 13 BD: 80 ++++: dev_base_lock ->&xa->xa_lock#3 FD: 1 BD: 72 ++++: qdisc_mod_lock FD: 24 BD: 1 ++++: bt_proto_lock ->pool_lock#2 ->&dir->lock ->&obj_hash[i].lock ->chan_list_lock ->l2cap_sk_list.lock ->hci_sk_list.lock ->&c->lock ->&n->list_lock ->sco_sk_list.lock ->hidp_sk_list.lock ->rfcomm_sk_list.lock ->&____s->seqcount#2 ->&____s->seqcount ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 145 BD: 22 +.+.: hci_cb_list_lock ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->chan_list_lock ->&conn->ident_lock ->&base->lock ->&list->lock#9 ->&conn->chan_lock ->&rq->__lock ->hci_cb_list_lock.wait_lock ->&c->lock ->&____s->seqcount ->&list->lock#10 ->(work_completion)(&(&conn->id_addr_timer)->work) ->(work_completion)(&(&conn->info_timer)->work) FD: 286 BD: 4 +.+.: mgmt_chan_list_lock ->pool_lock#2 ->tk_core.seq.seqcount ->hci_sk_list.lock ->&obj_hash[i].lock ->hci_dev_list_lock ->(console_sem).lock ->&rq->__lock ->fs_reclaim ->&c->lock ->rlock-AF_BLUETOOTH ->&hdev->lock ->mgmt_chan_list_lock.wait_lock ->&n->list_lock FD: 1 BD: 3786 ....: &list->lock#2 FD: 128 BD: 74 +.+.: rate_ctrl_mutex ->fs_reclaim ->pool_lock#2 FD: 2 BD: 6 +.+.: netlbl_domhsh_lock ->pool_lock#2 FD: 1 BD: 1 +.+.: netlbl_unlhsh_lock FD: 186 BD: 1 +.+.: misc_mtx ->fs_reclaim ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&c->lock ->&zone->lock ->&____s->seqcount ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&x->wait#11 ->&rq->__lock ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#18 ->misc_minors_ida.xa_lock ->&cfs_rq->removed.lock ->&base->lock ->&dir->lock ->rfkill_global_mutex ->&____s->seqcount#2 ->remove_cache_srcu ->&n->list_lock FD: 29 BD: 177 ....: &x->wait#11 ->&p->pi_lock FD: 155 BD: 1 .+.+: sb_writers ->mount_lock ->&type->i_mutex_dir_key/1 ->&sb->s_type->i_mutex_key#4 ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#5 ->&wb->list_lock ->&type->i_mutex_dir_key#2 ->&sem->wait_lock ->&p->pi_lock ->&rq->__lock ->&dentry->d_lock ->tomoyo_ss FD: 146 BD: 2 +.+.: &type->i_mutex_dir_key/1 ->rename_lock.seqcount ->fs_reclaim ->&c->lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&dentry->d_lock ->&obj_hash[i].lock ->&sbinfo->stat_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#5 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->batched_entropy_u32.lock ->&sb->s_type->i_mutex_key#4 ->quarantine_lock ->tomoyo_ss ->&u->bindlock ->&n->list_lock ->&fsnotify_mark_srcu ->&sem->wait_lock ->&rq->__lock ->&cfs_rq->removed.lock ->remove_cache_srcu ->krc.lock ->&xa->xa_lock#8 ->&sb->s_type->i_mutex_key#4/4 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&____s->seqcount#2 FD: 107 BD: 3 +.+.: &sb->s_type->i_mutex_key#4 ->tk_core.seq.seqcount ->&rq->__lock ->tomoyo_ss ->&xattrs->lock ->&dentry->d_lock ->&fsnotify_mark_srcu ->&sb->s_type->i_mutex_key#4/4 ->&sb->s_type->i_lock_key#5 ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->stock_lock ->&xa->xa_lock#8 ->lock#4 ->&info->lock ->key#9 ->rcu_node_0 ->&sem->wait_lock ->&rcu_state.expedited_wq ->&wb->list_lock ->lock#5 ->&lruvec->lru_lock ->&obj_hash[i].lock FD: 3 BD: 2 +.+.: subsys mutex#18 ->&k->k_lock FD: 195 BD: 6 +.+.: input_mutex ->&rq->__lock ->input_devices_poll_wait.lock ->fs_reclaim ->pool_lock#2 ->&dev->mutex#2 ->&c->lock ->&zone->lock ->&____s->seqcount ->input_ida.xa_lock ->&x->wait#9 ->&obj_hash[i].lock ->chrdevs_lock ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&x->wait#11 ->uevent_sock_mutex ->running_helpers_waitq.lock ->&k->k_lock ->subsys mutex#30 ->&cfs_rq->removed.lock ->&led_cdev->led_access ->&mousedev->mutex/1 FD: 177 BD: 2 +.+.: (work_completion)(&rfkill_global_led_trigger_work) ->rfkill_global_mutex ->rfkill_global_mutex.wait_lock ->&p->pi_lock FD: 176 BD: 10 +.+.: rfkill_global_mutex ->fs_reclaim ->pool_lock#2 ->&c->lock ->&zone->lock ->&____s->seqcount ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->&rfkill->lock ->uevent_sock_mutex ->&n->list_lock ->&obj_hash[i].lock ->running_helpers_waitq.lock ->&rq->__lock ->&k->k_lock ->subsys mutex#41 ->triggers_list_lock ->leds_list_lock ->&pool->lock ->rfkill_global_mutex.wait_lock ->&cfs_rq->removed.lock ->&data->mtx ->&____s->seqcount#2 ->uevent_sock_mutex.wait_lock ->&p->pi_lock ->&sem->wait_lock FD: 1 BD: 7 ....: input_devices_poll_wait.lock FD: 308 BD: 3 ++++: (netlink_chain).rwsem ->pool_lock#2 ->&obj_hash[i].lock ->&rq->__lock ->reg_indoor_lock ->hwsim_radio_lock ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&q->instances_lock ->&log->instances_lock ->&nft_net->commit_mutex ->rcu_node_0 ->&n->list_lock ->&cfs_rq->removed.lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&rcu_state.expedited_wq ->pgd_lock ->stock_lock ->key ->pcpu_lock ->percpu_counters_lock FD: 14 BD: 1 ++++: proto_tab_lock ->pool_lock#2 ->&dir->lock ->&obj_hash[i].lock ->&c->lock ->raw_sk_list.lock ->&n->list_lock FD: 3 BD: 1 ....: random_ready_notifier.lock ->crngs.lock FD: 1 BD: 2 ....: misc_minors_ida.xa_lock FD: 41 BD: 1 ....: vga_lock#2 ->pci_config_lock ->(console_sem).lock FD: 178 BD: 1 +.+.: disable_lock ->fs_reclaim ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&k->k_lock ->&dev->power.lock ->dpm_list_mtx ->&(&priv->bus_notifier)->rwsem ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#3 FD: 140 BD: 1 +.+.: &type->s_umount_key#12/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#11 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 42 BD: 2 +.+.: &sb->s_type->i_lock_key#11 ->&dentry->d_lock FD: 285 BD: 2 +.+.: timer_update_work ->timer_keys_mutex FD: 284 BD: 3 +.+.: timer_keys_mutex ->cpu_hotplug_lock FD: 315 BD: 1 +.+.: (work_completion)(&tracerfs_init_work) ->pin_fs_lock ->fs_reclaim ->pool_lock#2 ->&zone->lock ->&____s->seqcount ->&c->lock ->sb_lock ->&type->s_umount_key#13/1 ->&type->s_umount_key#14 ->mnt_id_ida.xa_lock ->pcpu_alloc_mutex ->&dentry->d_lock ->mount_lock ->&obj_hash[i].lock ->&fsnotify_mark_srcu ->&sb->s_type->i_mutex_key#5 ->event_mutex ->(module_notify_list).rwsem ->trace_types_lock FD: 142 BD: 2 +.+.: &type->s_umount_key#13/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&c->lock ->&____s->seqcount ->mmu_notifier_invalidate_range_start ->&type->s_umount_key#14 ->&sb->s_type->i_lock_key#12 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 133 BD: 3 +.+.: &type->s_umount_key#14 ->sb_lock ->list_lrus_mutex ->&xa->xa_lock#4 ->&obj_hash[i].lock ->pool_lock#2 ->shrinker_rwsem ->&rsp->gp_wait ->pcpu_lock ->fs_reclaim ->&dentry->d_lock ->&lru->node[i].lock FD: 42 BD: 8 +.+.: &sb->s_type->i_lock_key#12 ->&dentry->d_lock FD: 12 BD: 4447 ....: &xa->xa_lock#4 ->pool_lock#2 ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&obj_hash[i].lock ->&n->list_lock FD: 130 BD: 6 +.+.: &sb->s_type->i_mutex_key#5 ->&sb->s_type->i_lock_key#12 ->rename_lock.seqcount ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&zone->lock ->&____s->seqcount ->&obj_hash[i].lock ->&c->lock ->&rq->__lock ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 4 BD: 18 ..-.: &rsp->gp_wait ->&obj_hash[i].lock ->pool_lock#2 FD: 134 BD: 4417 .+.+: &fsnotify_mark_srcu ->&conn->lock ->fs_reclaim ->pool_lock#2 ->&group->notification_lock ->&group->notification_waitq ->&obj_hash[i].lock ->&rq->__lock ->&c->lock ->rcu_node_0 ->remove_cache_srcu ->&____s->seqcount#2 ->&____s->seqcount ->&cfs_rq->removed.lock ->&n->list_lock ->pgd_lock ->stock_lock ->key ->pcpu_lock ->percpu_counters_lock ->&rcu_state.expedited_wq ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 140 BD: 1 +.+.: &type->s_umount_key#15/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#13 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 42 BD: 2 +.+.: &sb->s_type->i_lock_key#13 ->&dentry->d_lock FD: 302 BD: 2 +.+.: event_mutex ->pin_fs_lock ->&sb->s_type->i_mutex_key#5 ->trace_event_sem ->trace_event_sem.wait_lock ->&p->pi_lock ->trace_types_lock ->sched_register_mutex ->tracepoints_mutex ->&rq->__lock FD: 140 BD: 1 +.+.: &type->s_umount_key#16/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#14 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 42 BD: 2 +.+.: &sb->s_type->i_lock_key#14 ->&dentry->d_lock FD: 1 BD: 5 ....: trace_event_sem.wait_lock FD: 140 BD: 1 +.+.: &type->s_umount_key#17/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->&c->lock ->&____s->seqcount ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#15 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 42 BD: 74 +.+.: &sb->s_type->i_lock_key#15 ->&dentry->d_lock FD: 129 BD: 1 +.+.: kclist_lock ->resource_lock ->fs_reclaim ->pool_lock#2 ->&c->lock ->&____s->seqcount FD: 139 BD: 1 +.+.: &type->s_umount_key#18/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&____s->seqcount ->&c->lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#16 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 56 BD: 4426 +.+.: &sb->s_type->i_lock_key#16 ->&dentry->d_lock ->&xa->xa_lock#8 FD: 218 BD: 34 .+.+: tomoyo_ss ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->tomoyo_policy_lock ->(console_sem).lock ->console_owner_lock ->console_owner ->&obj_hash[i].lock ->&c->lock ->&zone->lock ->&____s->seqcount ->&dentry->d_lock ->tomoyo_log_lock ->tomoyo_log_wait.lock ->&rq->__lock ->file_systems_lock ->fs_reclaim ->quarantine_lock ->&mm->mmap_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&n->list_lock ->remove_cache_srcu ->rcu_node_0 ->&cfs_rq->removed.lock ->&base->lock ->&rcu_state.expedited_wq ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->&____s->seqcount#2 ->stock_lock ->mount_lock FD: 140 BD: 1 +.+.: &type->s_umount_key#19/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#17 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 42 BD: 4 +.+.: &sb->s_type->i_lock_key#17 ->&dentry->d_lock FD: 132 BD: 1 +.+.: &ns->lock ->&dentry->d_lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#6 FD: 130 BD: 2 +.+.: &sb->s_type->i_mutex_key#6 ->&sb->s_type->i_lock_key#17 ->rename_lock.seqcount ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&c->lock ->&zone->lock ->&____s->seqcount FD: 44 BD: 1 +.+.: &type->s_umount_key#20 ->sb_lock ->&dentry->d_lock FD: 128 BD: 1 +.+.: pnp_lock ->fs_reclaim ->pool_lock#2 FD: 1 BD: 1 +.+.: subsys mutex#19 FD: 3 BD: 1 +.+.: subsys mutex#20 ->&k->k_lock FD: 3 BD: 10 +.+.: subsys mutex#21 ->&k->k_lock FD: 3 BD: 1 +.+.: subsys mutex#22 ->&k->k_lock FD: 346 BD: 1 +.+.: tty_mutex ->(console_sem).lock ->console_lock ->fs_reclaim ->pool_lock#2 ->tty_ldiscs_lock ->&obj_hash[i].lock ->&k->list_lock ->&k->k_lock ->&tty->legacy_mutex FD: 4 BD: 1 +.+.: subsys mutex#23 ->&k->list_lock ->&k->k_lock FD: 1 BD: 1 ....: netevent_notif_chain.lock FD: 318 BD: 12 ++++: clients_rwsem ->fs_reclaim ->clients.xa_lock ->&device->client_data_rwsem FD: 9 BD: 13 +.+.: clients.xa_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 FD: 903 BD: 11 ++++: devices_rwsem ->rcu_node_0 ->&rq->__lock ->fs_reclaim ->pool_lock#2 ->devices.xa_lock ->&obj_hash[i].lock ->(console_sem).lock ->clients_rwsem ->rdma_nets_rwsem ->&pdata->netdev_lock ->&table->lock#4 ->devices_rwsem.wait_lock ->rdma_nets_rwsem.wait_lock ->&p->pi_lock ->&cfs_rq->removed.lock FD: 1 BD: 1 +.+.: (blocking_lsm_notifier_chain).rwsem FD: 223 BD: 72 ++++: (inetaddr_chain).rwsem ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->&c->lock ->fib_info_lock ->&dir->lock#2 ->&____s->seqcount ->nl_table_lock ->&obj_hash[i].lock ->nl_table_wait.lock ->&net->sctp.local_addr_lock ->&rq->__lock ->rlock-AF_NETLINK ->&n->list_lock ->remove_cache_srcu ->quarantine_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&ipvlan->addrs_lock ->&____s->seqcount#2 ->&meta->lock ->stock_lock ->&tbl->lock ->class ->(&tbl->proxy_timer) ->&base->lock ->krc.lock ->&dir->lock FD: 1 BD: 7 ....: inet6addr_chain.lock FD: 1 BD: 1 +.+.: buses_mutex FD: 1 BD: 1 +.+.: offload_lock FD: 1 BD: 1 +...: inetsw_lock FD: 894 BD: 1 +.+.: (wq_completion)events_power_efficient ->(work_completion)(&(&tbl->managed_work)->work) ->(check_lifetime_work).work ->(work_completion)(&(&cache_cleaner)->work) ->(work_completion)(&(&ops->cursor_work)->work) ->(work_completion)(&(&hub->init_work)->work) ->(work_completion)(&(&gc_work->dwork)->work) ->(work_completion)(&(&tbl->gc_work)->work) ->(reg_check_chans).work ->(crda_timeout).work ->(gc_work).work ->(work_completion)(&(&flowtable->gc_work)->work) ->(work_completion)(&barr->work) ->(work_completion)(&(&hinfo->gc_work)->work) ->&rq->__lock FD: 52 BD: 2 +.+.: (work_completion)(&(&tbl->managed_work)->work) ->&tbl->lock ->&rq->__lock FD: 51 BD: 3786 ++-.: &tbl->lock ->&obj_hash[i].lock ->&base->lock ->&n->lock ->pool_lock#2 ->batched_entropy_u32.lock ->nl_table_lock ->nl_table_wait.lock ->&dir->lock#2 ->krc.lock ->&c->lock ->&____s->seqcount ->&n->list_lock ->rlock-AF_NETLINK ->&____s->seqcount#2 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->quarantine_lock ->init_task.mems_allowed_seq.seqcount FD: 1 BD: 76 +.+.: ptype_lock FD: 29 BD: 2 +.+.: (check_lifetime_work).work ->&obj_hash[i].lock ->&base->lock FD: 1 BD: 72 +.+.: &net->rules_mod_lock FD: 1 BD: 1 +.+.: tcp_ulp_list_lock FD: 1 BD: 1 +...: xfrm_state_afinfo_lock FD: 1 BD: 1 +.+.: xfrm_policy_afinfo_lock FD: 1 BD: 1 +...: xfrm_input_afinfo_lock FD: 18 BD: 4580 ..-.: krc.lock ->&obj_hash[i].lock ->hrtimer_bases.lock ->&base->lock FD: 132 BD: 1 +.+.: (wq_completion)events_highpri ->(work_completion)(&(&krcp->page_cache_work)->work) ->(work_completion)(flush) ->(work_completion)(&barr->work) FD: 128 BD: 3 +.+.: (work_completion)(&(&krcp->page_cache_work)->work) ->fs_reclaim ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->krc.lock FD: 1 BD: 3 +.+.: &hashinfo->lock FD: 1 BD: 1 +.+.: tcp_cong_list_lock FD: 2 BD: 7 +.+.: cache_list_lock ->&cd->hash_lock FD: 1 BD: 1 +.+.: (rpc_pipefs_notifier_list).rwsem FD: 1 BD: 1 +.+.: svc_xprt_class_lock FD: 40 BD: 1 +.+.: xprt_list_lock ->(console_sem).lock FD: 15 BD: 2 +.+.: (work_completion)(&(&cache_cleaner)->work) ->cache_list_lock ->&obj_hash[i].lock ->&base->lock FD: 1 BD: 1 ....: pcibios_fwaddrmap_lock FD: 136 BD: 3 .+.+: sb_writers#2 ->mount_lock ->&sb->s_type->i_mutex_key/1 ->&sb->s_type->i_mutex_key FD: 132 BD: 4 +.+.: &sb->s_type->i_mutex_key/1 ->rename_lock.seqcount ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->&obj_hash[i].lock ->tomoyo_ss ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#2 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&c->lock ->&zone->lock ->&____s->seqcount ->&sb->s_type->i_mutex_key FD: 1 BD: 2 +.+.: tomoyo_log_lock FD: 1 BD: 2 ....: tomoyo_log_wait.lock FD: 68 BD: 4425 +.+.: &wb->list_lock ->&sb->s_type->i_lock_key#2 ->&sb->s_type->i_lock_key#23 ->&sb->s_type->i_lock_key#22 ->&sb->s_type->i_lock_key ->&sb->s_type->i_lock_key#5 ->&sb->s_type->i_lock_key#8 ->&sb->s_type->i_lock_key#24 ->&sb->s_type->i_lock_key#3 ->&sb->s_type->i_lock_key#31 ->&sb->s_type->i_lock_key#27 ->&sb->s_type->i_lock_key#16 FD: 182 BD: 4 ++++: umhelper_sem ->usermodehelper_disabled_waitq.lock ->fs_reclaim ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->&dev->power.lock ->dpm_list_mtx ->&k->k_lock ->subsys mutex#80 ->fw_lock ->uevent_sock_mutex ->running_helpers_waitq.lock ->&x->wait#23 ->&base->lock ->&rq->__lock ->(&timer.timer) ->dev_pm_qos_sysfs_mtx ->kernfs_idr_lock ->deferred_probe_mutex ->device_links_lock ->mmu_notifier_invalidate_range_start ->&____s->seqcount#2 ->&cfs_rq->removed.lock FD: 1 BD: 5 ....: usermodehelper_disabled_waitq.lock FD: 198 BD: 2 +.+.: (work_completion)(&sub_info->work) ->&sighand->siglock ->fs_reclaim ->pool_lock#2 ->free_vmap_area_lock ->vmap_area_lock ->&zone->lock ->&____s->seqcount ->init_mm.page_table_lock ->batched_entropy_u64.lock ->&obj_hash[i].lock ->&c->lock ->init_files.file_lock ->init_fs.lock ->&p->alloc_lock ->lock ->pidmap_lock ->cgroup_threadgroup_rwsem ->input_pool.lock ->&p->pi_lock ->&rq->__lock ->rcu_node_0 ->&sig->wait_chldexit ->tasklist_lock ->&prev->lock ->css_set_lock ->&x->wait#17 ->&n->list_lock ->&cfs_rq->removed.lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&____s->seqcount#2 ->remove_cache_srcu ->&rcu_state.expedited_wq FD: 1 BD: 1 +.+.: &drv->dynids.lock FD: 1 BD: 1 +.+.: umh_sysctl_lock FD: 68 BD: 4393 ++++: &anon_vma->rwsem ->&mm->page_table_lock ->&rq->__lock ->&obj_hash[i].lock ->pool_lock#2 ->quarantine_lock ->&____s->seqcount ->&c->lock ->&n->list_lock ->&cfs_rq->removed.lock ->&sem->wait_lock ->rcu_node_0 ->&meta->lock ->kfence_freelist_lock ->&rcu_state.gp_wq ->&base->lock ->mmu_notifier_invalidate_range_start ->ptlock_ptr(page) ->ptlock_ptr(page)#2 ->stock_lock ->&____s->seqcount#2 ->&rcu_state.expedited_wq ->batched_entropy_u8.lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 1 BD: 4986 -.-.: per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 32 BD: 4426 +.+.: lock#4 ->&lruvec->lru_lock ->&obj_hash[i].lock ->&pcp->lock ->lock#11 FD: 271 BD: 1 +.+.: &sig->cred_guard_mutex ->fs_reclaim ->pool_lock#2 ->&fs->lock ->&zone->lock ->&____s->seqcount ->&c->lock ->&dentry->d_lock ->&sb->s_type->i_mutex_key ->&obj_hash[i].lock ->delayed_uprobe_lock ->&mm->mmap_lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->&rq->__lock ->pool_lock ->&cfs_rq->removed.lock ->rcu_node_0 ->quarantine_lock ->delayed_uprobe_lock.wait_lock ->&p->pi_lock ->&dentry->d_lock/1 ->&meta->lock ->kfence_freelist_lock ->&n->list_lock ->batched_entropy_u8.lock ->init_fs.lock ->&type->i_mutex_dir_key#3 ->&sb->s_type->i_lock_key#22 ->&sb->s_type->i_mutex_key#8 ->aa_buffers_lock ->mapping.invalidate_lock ->&folio_wait_table[i] ->tomoyo_ss ->&iint->mutex ->binfmt_lock ->entries_lock ->&ei->xattr_sem ->&tsk->futex_exit_mutex ->&sig->exec_update_lock ->&p->alloc_lock ->tk_core.seq.seqcount ->&stopper->lock ->&stop_pi_lock ->&x->wait#8 ->key#5 ->remove_cache_srcu ->&____s->seqcount#2 FD: 2 BD: 4442 ..-.: &lruvec->lru_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 3 BD: 4419 +.+.: lock#5 ->&lruvec->lru_lock FD: 129 BD: 149 ++++: &vma->vm_lock->lock ->&rq->__lock ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->ptlock_ptr(page)#2 ->mmu_notifier_invalidate_range_start ->ptlock_ptr(page) ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->rcu_node_0 ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->&lruvec->lru_lock ->&c->lock ->&rcu_state.expedited_wq ->remove_cache_srcu ->batched_entropy_u8.lock ->kfence_freelist_lock ->&p->pi_lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->stock_lock ->&rcu_state.gp_wq ->&n->list_lock ->pool_lock FD: 213 BD: 2 +.+.: &tsk->futex_exit_mutex ->&p->pi_lock ->&rq->__lock ->&mm->mmap_lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->rcu_node_0 ->pool_lock#2 ->&rcu_state.expedited_wq FD: 28 BD: 1 +.+.: &child->perf_event_mutex ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 149 ....: &pid->wait_pidfd FD: 29 BD: 156 ....: &sig->wait_chldexit ->&p->pi_lock FD: 15 BD: 156 ....: &(&sig->stats_lock)->lock ->&____s->seqcount#5 FD: 14 BD: 157 ....: &____s->seqcount#5 ->pidmap_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 40 BD: 1 +.+.: low_water_lock ->(console_sem).lock ->console_owner_lock ->console_owner FD: 287 BD: 1 +.+.: vendor_module_lock ->slab_mutex ->pcpu_alloc_mutex ->pool_lock#2 ->&obj_hash[i].lock ->percpu_counters_lock ->fs_reclaim ->shrinker_rwsem ->&____s->seqcount ->&zone->lock ->cpu_hotplug_lock ->timekeeper_lock FD: 1 BD: 5033 -.-.: pvclock_gtod_data FD: 13 BD: 1 +.-.: (&tcp_orphan_timer) ->&obj_hash[i].lock ->&base->lock FD: 31 BD: 1 ..-.: &(&cache_cleaner)->timer FD: 138 BD: 3 ++++: &type->i_mutex_dir_key#2 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->rename_lock.seqcount ->namespace_sem ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#5 ->&c->lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&n->list_lock ->&____s->seqcount ->&sem->wait_lock ->&rq->__lock ->remove_cache_srcu ->&xa->xa_lock#4 ->&obj_hash[i].lock ->stock_lock ->tomoyo_ss ->&sbinfo->stat_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->batched_entropy_u32.lock FD: 3 BD: 146 +.+.: subsys mutex#24 ->&k->k_lock FD: 3 BD: 146 +.+.: subsys mutex#25 ->&k->k_lock FD: 1 BD: 1 +.+.: subsys mutex#26 FD: 188 BD: 1 +.+.: subsys mutex#27 ->&k->list_lock ->&k->k_lock ->fs_reclaim ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->platform_devid_ida.xa_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->&(&priv->bus_notifier)->rwsem ->uevent_sock_mutex ->running_helpers_waitq.lock ->&c->lock ->&____s->seqcount ->subsys mutex#3 ->wakeup_ida.xa_lock ->gdp_mutex ->subsys mutex#15 ->events_lock ->rtcdev_lock FD: 1 BD: 1 +.+.: subsys mutex#28 FD: 38 BD: 2 +.+.: (work_completion)(&p->wq) ->vmap_area_lock ->&obj_hash[i].lock ->purge_vmap_area_lock ->pool_lock#2 ->rcu_node_0 ->&rq->__lock ->&base->lock ->&meta->lock ->kfence_freelist_lock ->&rcu_state.expedited_wq ->quarantine_lock FD: 31 BD: 1 ..-.: &(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->timer FD: 31 BD: 1 ..-.: &(&group->avgs_work)->timer FD: 33 BD: 1 ..-.: mm/memcontrol.c:589 FD: 29 BD: 2 +.+.: (stats_flush_dwork).work ->cgroup_rstat_lock ->&obj_hash[i].lock ->&base->lock ->&rq->__lock FD: 2 BD: 18 ....: cgroup_rstat_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 29 BD: 2 +.+.: (work_completion)(&(&group->avgs_work)->work) ->&group->avgs_lock ->&rq->__lock FD: 28 BD: 3 +.+.: &group->avgs_lock ->&per_cpu_ptr(group->pcpu, cpu)->seq ->&obj_hash[i].lock ->&base->lock ->&rq->__lock FD: 1 BD: 147 +.+.: subsys mutex#29 FD: 1 BD: 4 +.+.: key_user_lock FD: 1 BD: 4 +.+.: key_serial_lock FD: 5 BD: 5 +.+.: key_construction_mutex ->&obj_hash[i].lock ->pool_lock#2 ->keyring_name_lock FD: 135 BD: 3 +.+.: &type->lock_class ->keyring_serialise_link_lock ->fs_reclaim ->pool_lock#2 ->&c->lock ->&zone->lock ->&____s->seqcount ->key_user_lock ->crngs.lock ->key_serial_lock ->key_construction_mutex ->ima_keys_lock FD: 131 BD: 4 +.+.: keyring_serialise_link_lock ->fs_reclaim ->&c->lock ->&____s->seqcount ->pool_lock#2 ->&obj_hash[i].lock ->root_key_user.lock ->key_construction_mutex FD: 29 BD: 4403 ....: &pgdat->kswapd_wait ->&p->pi_lock FD: 1 BD: 1 +.+.: drivers_lock FD: 137 BD: 1 +.+.: damon_dbgfs_lock ->fs_reclaim ->pool_lock#2 ->tk_core.seq.seqcount ->damon_ops_lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 FD: 140 BD: 1 +.+.: &type->s_umount_key#21/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#18 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 42 BD: 2 +.+.: &sb->s_type->i_lock_key#18 ->&dentry->d_lock FD: 1 BD: 1 +.+.: dq_list_lock FD: 140 BD: 1 +.+.: &type->s_umount_key#22/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#19 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 42 BD: 7 +.+.: &sb->s_type->i_lock_key#19 ->&dentry->d_lock FD: 1 BD: 1 +.+.: configfs_subsystem_mutex FD: 137 BD: 1 +.+.: &sb->s_type->i_mutex_key#7/1 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->&____s->seqcount ->&c->lock ->configfs_dirent_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#19 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&default_group_class[depth - 1]/2 ->&default_group_class[depth - 1]#2 ->&zone->lock FD: 1 BD: 8 +.+.: configfs_dirent_lock FD: 135 BD: 2 +.+.: &default_group_class[depth - 1]/2 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->configfs_dirent_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#19 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&default_group_class[depth - 1]#3/2 ->&c->lock ->&zone->lock ->&____s->seqcount FD: 128 BD: 1 +.+.: ecryptfs_daemon_hash_mux ->fs_reclaim ->pool_lock#2 FD: 1 BD: 1 ....: &ecryptfs_kthread_ctl.wait FD: 2 BD: 1 +.+.: ecryptfs_msg_ctx_lists_mux ->&ecryptfs_msg_ctx_arr[i].mux FD: 1 BD: 2 +.+.: &ecryptfs_msg_ctx_arr[i].mux FD: 1 BD: 1 +.+.: nfs_version_lock FD: 144 BD: 1 ++++: key_types_sem ->(console_sem).lock ->asymmetric_key_parsers_sem ->&type->lock_class ->&obj_hash[i].lock ->pool_lock#2 ->&rq->__lock FD: 1 BD: 1 +.+.: pnfs_spinlock FD: 1 BD: 5 +.+.: &sn->pipefs_sb_lock FD: 1 BD: 1 +.+.: nls_lock FD: 1 BD: 1 +.+.: jffs2_compressor_list_lock FD: 1 BD: 1 +.+.: next_tag_value_lock FD: 1 BD: 1 ....: log_redrive_lock FD: 2 BD: 1 ....: &TxAnchor.LazyLock ->jfs_commit_thread_wait.lock FD: 1 BD: 2 ....: jfs_commit_thread_wait.lock FD: 1 BD: 1 +.+.: jfsTxnLock FD: 40 BD: 1 +.+.: ocfs2_stack_lock ->(console_sem).lock FD: 1 BD: 1 +.+.: o2hb_callback_sem FD: 1 BD: 1 +.+.: o2net_handler_lock FD: 140 BD: 1 +.+.: &type->s_umount_key#23/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&zone->lock ->&____s->seqcount ->&c->lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#20 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock ->&n->list_lock ->&xa->xa_lock#4 ->&obj_hash[i].lock ->stock_lock ->&rq->__lock FD: 42 BD: 5 +.+.: &sb->s_type->i_lock_key#20 ->&dentry->d_lock FD: 284 BD: 86 +.+.: nf_hook_mutex ->fs_reclaim ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&c->lock ->stock_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&____s->seqcount#2 ->&obj_hash[i].lock ->&rq->__lock ->&cfs_rq->removed.lock ->cpu_hotplug_lock ->remove_cache_srcu ->&n->list_lock ->rcu_node_0 ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 128 BD: 1 ++++: alg_types_sem ->fs_reclaim ->pool_lock#2 FD: 1 BD: 1 +.+.: dma_list_mutex FD: 135 BD: 2 ++++: asymmetric_key_parsers_sem ->(console_sem).lock ->fs_reclaim ->pool_lock#2 ->&c->lock ->&zone->lock ->&____s->seqcount ->crypto_alg_sem ->&obj_hash[i].lock ->(crypto_chain).rwsem ->&x->wait#21 ->&base->lock ->&rq->__lock ->(&timer.timer) FD: 880 BD: 1 +.+.: blkcg_pol_register_mutex ->blkcg_pol_mutex ->cgroup_mutex FD: 1 BD: 4 +.+.: elv_list_lock FD: 133 BD: 3 +.+.: crc_t10dif_mutex ->crypto_alg_sem ->fs_reclaim ->pool_lock#2 FD: 133 BD: 1 +.+.: crc64_rocksoft_mutex ->crypto_alg_sem ->fs_reclaim ->pool_lock#2 FD: 1 BD: 1 +.+.: ts_mod_lock FD: 3 BD: 7 +.+.: subsys mutex#30 ->&k->k_lock FD: 44 BD: 10 +.+.: &dev->mutex#2 ->&obj_hash[i].lock ->&rnp->exp_wq[3] ->&rq->__lock ->&rnp->exp_lock ->&rnp->exp_wq[0] ->rcu_state.exp_mutex FD: 32 BD: 5 ....: wakeup_srcu_srcu_usage.lock ->&obj_hash[i].lock ->&ACCESS_PRIVATE(sdp, lock) FD: 1 BD: 3 ....: wakeup_srcu FD: 1 BD: 3 ....: (&ws->timer) FD: 1 BD: 292 +.+.: klist_remove_lock FD: 5 BD: 4084 ....: &ws->lock ->tk_core.seq.seqcount ->&obj_hash[i].lock FD: 1 BD: 3 ....: deleted_ws.lock FD: 164 BD: 1 +.+.: register_count_mutex ->&k->list_lock ->fs_reclaim ->pool_lock#2 ->lock ->&root->kernfs_rwsem ->&k->k_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->uevent_sock_mutex ->&obj_hash[i].lock ->running_helpers_waitq.lock ->&rq->__lock FD: 1 BD: 1 +.+.: (cpufreq_policy_notifier_list).rwsem FD: 1 BD: 1 +.+.: cpuidle_driver_lock FD: 1 BD: 1 ....: thermal_cdev_ida.xa_lock FD: 1 BD: 4 ....: cpufreq_driver_lock FD: 3 BD: 1 +.+.: subsys mutex#31 ->&k->k_lock FD: 1 BD: 1 +.+.: (x86_mce_decoder_chain).rwsem FD: 1 BD: 1 ....: virtio_index_ida.xa_lock FD: 1 BD: 1 +.+.: subsys mutex#32 FD: 170 BD: 144 +.+.: &md->mutex ->fs_reclaim ->pool_lock#2 ->irq_domain_mutex ->pci_config_lock ->&xa->xa_lock#5 ->&domain->mutex ->&irq_desc_lock_class ->vector_lock ->&root->kernfs_rwsem ->lock ->&c->lock ->&____s->seqcount ->&zone->lock FD: 2 BD: 145 +.+.: &xa->xa_lock#5 ->pool_lock#2 FD: 1 BD: 1 +.+.: &dev->vqs_list_lock FD: 1 BD: 1 ....: &vp_dev->lock FD: 1 BD: 1 +.+.: (oom_notify_list).rwsem FD: 1 BD: 1 ....: &dev->config_lock FD: 1 BD: 6 ++++: vdpa_dev_lock FD: 3 BD: 1 +.+.: subsys mutex#33 ->&k->k_lock FD: 30 BD: 4489 +.+.: &dentry->d_lock/1 ->&lru->node[i].lock FD: 31 BD: 1 -.-.: &vb->stop_update_lock FD: 285 BD: 1 +.+.: (wq_completion)events_freezable ->(work_completion)(&vb->update_balloon_stats_work) FD: 284 BD: 2 +.+.: (work_completion)(&vb->update_balloon_stats_work) ->cpu_hotplug_lock ->&s->s_inode_list_lock ->&rq->__lock FD: 1 BD: 225 +.+.: pcpu_alloc_mutex.wait_lock FD: 1 BD: 4 +.+.: delayed_uprobe_lock.wait_lock FD: 261 BD: 1 +.+.: serial_mutex ->gpio_lookup_lock ->port_mutex FD: 1 BD: 2 +.+.: gpio_lookup_lock FD: 259 BD: 2 +.+.: port_mutex ->fs_reclaim ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->&zone->lock ->&____s->seqcount ->sysfs_symlink_target_lock ->&k->k_lock ->&c->lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#34 ->&rq->__lock ->&port->mutex FD: 1 BD: 3 +.+.: subsys mutex#34 FD: 1 BD: 4478 ....: &dev->power.wait_queue FD: 38 BD: 1 +.+.: (wq_completion)pm ->(work_completion)(&dev->power.work) FD: 37 BD: 2 +.+.: (work_completion)(&dev->power.work) ->&dev->power.lock ->&port_lock_key FD: 254 BD: 9 +.+.: &port->mutex ->fs_reclaim ->pool_lock#2 ->console_mutex ->resource_lock ->&port_lock_key ->(console_sem).lock ->ctrl_ida.xa_lock ->&x->wait#9 ->&obj_hash[i].lock ->&dev->power.lock ->&c->lock ->&zone->lock ->&____s->seqcount ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&k->k_lock ->dpm_list_mtx ->uevent_sock_mutex ->running_helpers_waitq.lock ->&rq->__lock ->subsys mutex#35 ->semaphore->lock ->*(&acpi_gbl_reference_count_lock) ->dev_pm_qos_sysfs_mtx ->kernfs_idr_lock ->deferred_probe_mutex ->device_links_lock ->mmu_notifier_invalidate_range_start ->&cfs_rq->removed.lock ->gdp_mutex ->req_lock ->&p->pi_lock ->&x->wait#11 ->subsys mutex#21 ->chrdevs_lock ->hash_mutex ->&i->lock ->&desc->request_mutex ->register_lock ->&irq_desc_lock_class ->proc_subdir_lock ->proc_inum_ida.xa_lock FD: 1 BD: 10 ....: ctrl_ida.xa_lock FD: 1 BD: 10 +.+.: subsys mutex#35 FD: 1 BD: 1 ....: rng_index_ida.xa_lock FD: 131 BD: 1 +.+.: rng_mutex ->&x->wait#13 ->fs_reclaim ->pool_lock#2 ->kthread_create_lock ->&p->pi_lock ->&x->wait ->&rq->__lock ->&obj_hash[i].lock FD: 29 BD: 2 -.-.: &x->wait#12 ->&p->pi_lock FD: 1 BD: 2 ....: &x->wait#13 FD: 31 BD: 1 +.+.: reading_mutex ->reading_mutex.wait_lock ->&x->wait#12 ->&rq->__lock FD: 1 BD: 2 +.+.: reading_mutex.wait_lock FD: 1 BD: 1 ....: &dev->managed.lock FD: 140 BD: 1 +.+.: &type->s_umount_key#24/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#21 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock FD: 42 BD: 2 +.+.: &sb->s_type->i_lock_key#21 ->&dentry->d_lock FD: 2 BD: 248 ....: drm_minor_lock ->pool_lock#2 FD: 28 BD: 1 +.+.: &dev->debugfs_mutex ->&rq->__lock FD: 3 BD: 3 +.+.: subsys mutex#36 ->&k->k_lock FD: 128 BD: 24 +.+.: &dev->mode_config.idr_mutex ->fs_reclaim ->pool_lock#2 FD: 1 BD: 1 ....: (worker)->lock FD: 153 BD: 20 +.+.: crtc_ww_class_acquire ->crtc_ww_class_mutex ->fs_reclaim ->&____s->seqcount ->&zone->lock ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock FD: 152 BD: 21 +.+.: crtc_ww_class_mutex ->reservation_ww_class_acquire ->fs_reclaim ->pool_lock#2 ->&c->lock ->&____s->seqcount ->&obj_hash[i].lock ->&dev->mode_config.idr_mutex ->&dev->mode_config.blob_lock ->&crtc->commit_lock ->reservation_ww_class_mutex ->tk_core.seq.seqcount ->&vkms_out->lock ->&dev->vbl_lock ->&x->wait#14 ->(work_completion)(&vkms_state->composer_work) ->&base->lock ->&rq->__lock ->(&timer.timer) ->(work_completion)(&vkms_state->composer_work)#2 ->&zone->lock FD: 1 BD: 22 +.+.: &dev->mode_config.blob_lock FD: 1 BD: 1 ....: &xa->xa_lock#6 FD: 1 BD: 1 ....: &xa->xa_lock#7 FD: 1 BD: 23 ....: &dev->mode_config.connector_list_lock FD: 20 BD: 25 ..-.: &dev->vbl_lock ->&dev->vblank_time_lock FD: 179 BD: 1 .+.+: drm_connector_list_iter ->&dev->mode_config.connector_list_lock ->fs_reclaim ->pool_lock#2 ->&connector->mutex FD: 178 BD: 2 +.+.: &connector->mutex ->fs_reclaim ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->&zone->lock ->&____s->seqcount ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&c->lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->running_helpers_waitq.lock ->&rq->__lock ->&k->k_lock ->subsys mutex#36 ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->&dev->mode_config.idr_mutex ->connector_list_lock FD: 1 BD: 3 +.+.: connector_list_lock FD: 1 BD: 1 +.+.: &dev->filelist_mutex FD: 225 BD: 1 +.+.: &dev->clientlist_mutex ->&helper->lock ->registration_lock ->(console_sem).lock ->kernel_fb_helper_lock FD: 183 BD: 16 +.+.: &helper->lock ->fs_reclaim ->pool_lock#2 ->&client->modeset_mutex ->&obj_hash[i].lock ->&sbinfo->stat_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->batched_entropy_u32.lock ->&mgr->vm_lock ->&dev->object_name_lock ->&node->vm_lock ->&file_private->table_lock ->&dev->mode_config.idr_mutex ->&dev->mode_config.fb_lock ->&file->fbs_lock ->&prime_fpriv->lock ->free_vmap_area_lock ->vmap_area_lock ->&zone->lock ->&____s->seqcount ->init_mm.page_table_lock ->&dev->master_mutex ->&rq->__lock ->&lock->wait_lock ->&pool->lock ->reservation_ww_class_mutex FD: 155 BD: 18 +.+.: &client->modeset_mutex ->&dev->mode_config.mutex ->fs_reclaim ->pool_lock#2 ->crtc_ww_class_acquire FD: 154 BD: 19 +.+.: &dev->mode_config.mutex ->crtc_ww_class_acquire ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock FD: 2 BD: 17 +.+.: &mgr->vm_lock ->pool_lock#2 FD: 36 BD: 17 +.+.: &dev->object_name_lock ->lock FD: 4 BD: 248 +.+.: &file_private->table_lock ->pool_lock#2 ->&obj_hash[i].lock FD: 4 BD: 17 +.+.: &node->vm_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 17 +.+.: &dev->mode_config.fb_lock FD: 1 BD: 17 +.+.: &file->fbs_lock FD: 1 BD: 17 +.+.: &prime_fpriv->lock FD: 223 BD: 2 +.+.: registration_lock ->fs_reclaim ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&c->lock ->&zone->lock ->&____s->seqcount ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&x->wait#11 ->&rq->__lock ->uevent_sock_mutex ->running_helpers_waitq.lock ->&k->k_lock ->subsys mutex#11 ->vt_switch_mutex ->(console_sem).lock ->console_lock FD: 128 BD: 3 +.+.: vt_switch_mutex ->fs_reclaim ->pool_lock#2 FD: 1 BD: 14 +.+.: &fb_info->lock FD: 156 BD: 17 +.+.: &dev->master_mutex ->&client->modeset_mutex FD: 1 BD: 22 +.+.: &crtc->commit_lock FD: 134 BD: 165 +.+.: &shmem->vmap_lock ->&shmem->pages_lock ->fs_reclaim ->pool_lock#2 ->free_vmap_area_lock ->vmap_area_lock ->&____s->seqcount ->init_mm.page_table_lock ->&zone->lock FD: 130 BD: 166 +.+.: &shmem->pages_lock ->fs_reclaim ->pool_lock#2 ->&zone->lock ->&____s->seqcount ->&xa->xa_lock#8 ->lock#4 ->&info->lock FD: 42 BD: 4446 ..-.: &xa->xa_lock#8 ->&c->lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&obj_hash[i].lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->key#10 ->&s->s_inode_wblist_lock ->&base->lock ->key#12 ->&wb->work_lock ->&n->list_lock ->key#13 ->&pl->lock ->stock_lock ->&xa->xa_lock#4 ->&____s->seqcount#2 ->key#28 FD: 2 BD: 173 ....: &info->lock ->key#9 FD: 36 BD: 22 -.-.: &vkms_out->lock ->&dev->event_lock FD: 35 BD: 23 -.-.: &dev->event_lock ->&dev->vbl_lock ->&____s->seqcount#6 ->&x->wait#14 ->&obj_hash[i].lock ->pool_lock#2 ->&dev->vblank_time_lock ->&vblank->queue ->&base->lock FD: 1 BD: 28 ----: &____s->seqcount#6 FD: 29 BD: 24 -...: &x->wait#14 ->&p->pi_lock FD: 19 BD: 26 -.-.: &dev->vblank_time_lock ->tk_core.seq.seqcount ->&(&vblank->seqlock)->lock ->&obj_hash[i].lock ->hrtimer_bases.lock FD: 2 BD: 27 -.-.: &(&vblank->seqlock)->lock ->&____s->seqcount#6 FD: 1 BD: 22 +.+.: (work_completion)(&vkms_state->composer_work) FD: 1 BD: 18 ....: &helper->damage_lock FD: 185 BD: 2 +.+.: (work_completion)(&helper->damage_work) ->&helper->damage_lock ->&helper->lock FD: 1 BD: 3901 +.+.: &lock->wait_lock FD: 1 BD: 24 -.-.: &vblank->queue FD: 1 BD: 22 +.+.: (work_completion)(&vkms_state->composer_work)#2 FD: 1 BD: 14 ....: vt_event_lock FD: 1 BD: 2 +.+.: kernel_fb_helper_lock FD: 1 BD: 1 +...: &dev->queue_lock FD: 1 BD: 8 ....: blk_queue_ida.xa_lock FD: 9 BD: 6 +.+.: &xa->xa_lock#9 ->&c->lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 FD: 22 BD: 309 ....: &q->queue_lock ->&blkcg->lock ->pool_lock#2 ->pcpu_lock ->&obj_hash[i].lock ->percpu_counters_lock ->&c->lock ->&____s->seqcount ->&zone->lock FD: 20 BD: 310 ....: &blkcg->lock ->pool_lock#2 ->percpu_ref_switch_lock ->(&sq->pending_timer) ->&obj_hash[i].lock ->&base->lock ->percpu_counters_lock ->pcpu_lock ->pool_lock ->&c->lock FD: 298 BD: 8 +.+.: &q->sysfs_lock ->&q->unused_hctx_lock ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->&n->list_lock ->&zone->lock ->&____s->seqcount ->cpu_hotplug_lock ->fs_reclaim ->&xa->xa_lock#10 ->&q->debugfs_mutex ->pcpu_alloc_mutex ->&q->rq_qos_mutex ->&stats->lock ->&rq->__lock ->lock ->&root->kernfs_rwsem ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 1 BD: 9 +.+.: &q->unused_hctx_lock FD: 1 BD: 13 +.+.: &bdev->bd_size_lock FD: 2 BD: 11 +.+.: &xa->xa_lock#10 ->pool_lock#2 FD: 31 BD: 5 +.+.: &set->tag_list_lock ->&q->mq_freeze_lock ->percpu_ref_switch_lock ->&rq->__lock FD: 30 BD: 11 +.+.: &q->mq_freeze_lock ->percpu_ref_switch_lock ->&q->mq_freeze_wq ->&rq->__lock FD: 5 BD: 316 ..-.: percpu_ref_switch_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 317 ....: &q->mq_freeze_wq FD: 3 BD: 6 +.+.: subsys mutex#37 ->&k->k_lock FD: 299 BD: 6 +.+.: &q->sysfs_dir_lock ->fs_reclaim ->pool_lock#2 ->lock ->&root->kernfs_rwsem ->&c->lock ->&zone->lock ->&____s->seqcount ->&q->sysfs_lock ->&rq->__lock ->&obj_hash[i].lock FD: 136 BD: 10 +.+.: &q->debugfs_mutex ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 FD: 3 BD: 5 +.+.: subsys mutex#38 ->&k->k_lock FD: 1 BD: 5 ....: cgwb_lock FD: 1 BD: 5 +...: bdi_lock FD: 63 BD: 4417 +.+.: inode_hash_lock ->&sb->s_type->i_lock_key#3 ->&sb->s_type->i_lock_key#22 ->&s->s_inode_list_lock ->&sb->s_type->i_lock_key#24 ->&sb->s_type->i_lock_key#30 ->&sb->s_type->i_lock_key#31 FD: 2 BD: 4 +.+.: bdev_lock ->&bdev->bd_holder_lock FD: 320 BD: 3 +.+.: &disk->open_mutex ->fs_reclaim ->pool_lock#2 ->free_vmap_area_lock ->vmap_area_lock ->&____s->seqcount ->init_mm.page_table_lock ->&zone->lock ->&xa->xa_lock#8 ->lock#4 ->mmu_notifier_invalidate_range_start ->&c->lock ->&mapping->private_lock ->tk_core.seq.seqcount ->&ret->b_uptodate_lock ->&obj_hash[i].lock ->purge_vmap_area_lock ->&sb->s_type->i_lock_key#3 ->lock#5 ->&lruvec->lru_lock ->&rq->__lock ->&base->lock ->&hctx->lock ->&x->wait#16 ->&cfs_rq->removed.lock ->(&timer.timer) ->&q->sysfs_dir_lock ->rcu_node_0 ->&bdev->bd_size_lock ->&dd->lock ->&folio_wait_table[i] ->(console_sem).lock ->&s->s_inode_list_lock ->pcpu_alloc_mutex ->&x->wait#9 ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&x->wait#11 ->&k->k_lock ->subsys mutex#37 ->&xa->xa_lock#9 ->inode_hash_lock ->bdev_lock ->&lo->lo_mutex ->nbd_index_mutex ->&nbd->config_lock ->&new->lock ->&lock->wait_lock FD: 44 BD: 4420 +.+.: &mapping->private_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&xa->xa_lock#8 FD: 30 BD: 6 ..-.: &ret->b_uptodate_lock ->bit_wait_table + i FD: 13 BD: 5 ....: floppy_lock ->&obj_hash[i].lock ->&base->lock FD: 1 BD: 1 ..-.: percpu_ref_switch_waitq.lock FD: 29 BD: 4 ....: command_done.lock ->&p->pi_lock FD: 16 BD: 2 +.+.: floppy_work ->dma_spin_lock ->floppy_lock ->&obj_hash[i].lock ->fdc_wait.lock FD: 1 BD: 3 ....: dma_spin_lock FD: 128 BD: 1 +.+.: loop_ctl_mutex ->fs_reclaim ->pool_lock#2 FD: 141 BD: 9 +.+.: &q->rq_qos_mutex ->&q->mq_freeze_lock ->percpu_ref_switch_lock ->&q->debugfs_mutex ->set->srcu FD: 1 BD: 9 ....: &stats->lock FD: 31 BD: 1 ..-.: &(&ops->cursor_work)->timer FD: 33 BD: 2 +.+.: (work_completion)(&(&ops->cursor_work)->work) ->(console_sem).lock ->&obj_hash[i].lock ->&base->lock FD: 138 BD: 9 +.+.: nbd_index_mutex ->fs_reclaim ->pool_lock#2 ->&nbd->config_lock FD: 1 BD: 17 .+.+: set->srcu FD: 35 BD: 6 +.+.: (work_completion)(&(&q->requeue_work)->work) ->&q->requeue_lock ->&hctx->lock ->&dd->lock FD: 30 BD: 6 +.+.: (work_completion)(&(&hctx->run_work)->work) ->&rq->__lock FD: 311 BD: 1 +.+.: zram_index_mutex ->fs_reclaim ->pool_lock#2 ->blk_queue_ida.xa_lock ->&obj_hash[i].lock ->pcpu_alloc_mutex ->bio_slab_lock ->&c->lock ->&____s->seqcount ->percpu_counters_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#3 ->&s->s_inode_list_lock ->&xa->xa_lock#9 ->lock ->&q->queue_lock ->&x->wait#9 ->&bdev->bd_size_lock ->&k->list_lock ->gdp_mutex ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&rq->__lock ->&x->wait#11 ->subsys mutex#37 ->dev_hotplug_mutex ->&q->sysfs_dir_lock ->percpu_ref_switch_lock ->&zone->lock ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#38 ->cgwb_lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->bdi_lock ->inode_hash_lock ->(console_sem).lock FD: 3 BD: 1 +.+.: subsys mutex#39 ->&k->k_lock FD: 129 BD: 2 +.+.: &default_group_class[depth - 1]#2 ->fs_reclaim ->pool_lock#2 ->configfs_dirent_lock FD: 2 BD: 1 +.+.: &lock ->nullb_indexes.xa_lock FD: 1 BD: 2 ....: nullb_indexes.xa_lock FD: 1 BD: 1 +.+.: ctx_list.lock FD: 1 BD: 1 ....: nfc_index_ida.xa_lock FD: 172 BD: 3 +.+.: nfc_devlist_mutex ->fs_reclaim ->pool_lock#2 ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->&zone->lock ->&____s->seqcount ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->&obj_hash[i].lock ->running_helpers_waitq.lock ->&rq->__lock ->subsys mutex#40 ->&k->k_lock ->&genl_data->genl_data_mutex FD: 3 BD: 4 +.+.: subsys mutex#40 ->&k->k_lock FD: 1 BD: 79 ....: &rfkill->lock FD: 3 BD: 11 +.+.: subsys mutex#41 ->&k->k_lock FD: 177 BD: 2 +.+.: (work_completion)(&rfkill->sync_work) ->rfkill_global_mutex ->rfkill_global_mutex.wait_lock ->&p->pi_lock FD: 1 BD: 11 +.+.: rfkill_global_mutex.wait_lock FD: 1 BD: 1 +.+.: dma_heap_minors.xa_lock FD: 3 BD: 1 +.+.: subsys mutex#42 ->&k->k_lock FD: 1 BD: 1 +.+.: heap_list_lock FD: 31 BD: 1 ..-.: &(&krcp->monitor_work)->timer FD: 31 BD: 1 ..-.: &(&tbl->managed_work)->timer FD: 35 BD: 2 +.+.: (work_completion)(&(&krcp->monitor_work)->work) ->krc.lock ->&obj_hash[i].lock ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 1 BD: 1 ....: host_index_ida.xa_lock FD: 158 BD: 1 +.+.: scsi_sense_cache_mutex ->slab_mutex FD: 28 BD: 4 +.+.: subsys mutex#43 ->&rq->__lock FD: 3 BD: 1 +.+.: subsys mutex#44 ->&k->k_lock FD: 1 BD: 176 -.-.: &virtscsi_vq->vq_lock FD: 331 BD: 3 +.+.: &shost->scan_mutex ->fs_reclaim ->pool_lock#2 ->shost->host_lock ->&dev->power.lock ->&x->wait#9 ->&obj_hash[i].lock ->attribute_container_mutex ->blk_queue_ida.xa_lock ->pcpu_alloc_mutex ->&q->sysfs_lock ->&set->tag_list_lock ->batched_entropy_u32.lock ->&c->lock ->&zone->lock ->&____s->seqcount ->tk_core.seq.seqcount ->mmu_notifier_invalidate_range_start ->&hctx->lock ->&base->lock ->&x->wait#16 ->&rq->__lock ->(&timer.timer) ->&sdev->state_mutex ->&q->mq_freeze_lock ->&q->mq_freeze_wq ->percpu_ref_switch_lock ->(&q->timeout) ->(work_completion)(&q->timeout_work) ->(work_completion)(&(&q->requeue_work)->work) ->(work_completion)(&(&hctx->run_work)->work) ->cpu_hotplug_lock ->&xa->xa_lock#10 ->&q->unused_hctx_lock ->(work_completion)(&sdev->requeue_work) ->(work_completion)(&sdev->event_work) ->pcpu_lock ->&sdev->inquiry_mutex ->quarantine_lock ->(console_sem).lock ->&tags->lock ->&cfs_rq->removed.lock ->&meta->lock ->kfence_freelist_lock ->&x->wait#15 ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&k->k_lock ->dpm_list_mtx ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#43 ->device_links_srcu ->async_lock ->gdp_mutex ->subsys mutex#45 ->bsg_minor_ida.xa_lock ->chrdevs_lock ->req_lock ->&p->pi_lock ->&x->wait#11 ->subsys mutex#58 FD: 1 BD: 4 ....: shost->host_lock FD: 2 BD: 3 +.+.: async_scan_lock ->&x->wait#15 FD: 1 BD: 5 ....: &x->wait#15 FD: 1 BD: 166 +.+.: &hctx->lock FD: 29 BD: 5 ..-.: &x->wait#16 ->&p->pi_lock FD: 1 BD: 4 +.+.: &sdev->state_mutex FD: 31 BD: 4 +.-.: (&q->timeout) FD: 14 BD: 5 +.+.: (work_completion)(&q->timeout_work) ->&tags->lock ->&obj_hash[i].lock ->&base->lock FD: 1 BD: 4 +.+.: (work_completion)(&sdev->requeue_work) FD: 1 BD: 4 +.+.: (work_completion)(&sdev->event_work) FD: 1 BD: 4 +.+.: &sdev->inquiry_mutex FD: 175 BD: 4 +.+.: subsys mutex#45 ->&k->list_lock ->&k->k_lock ->fs_reclaim ->pool_lock#2 ->lock ->chrdevs_lock ->&c->lock ->&zone->lock ->&rq->__lock ->&____s->seqcount ->&x->wait#9 ->&obj_hash[i].lock ->gdp_mutex ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&x->wait#11 ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#57 ->(console_sem).lock FD: 1 BD: 6 ....: &tags->lock FD: 1 BD: 1 +.+.: nvmf_hosts_mutex FD: 3 BD: 1 +.+.: subsys mutex#46 ->&k->k_lock FD: 1 BD: 1 +.+.: nvmf_transports_rwsem FD: 3 BD: 1 +.+.: subsys mutex#47 ->&k->k_lock FD: 134 BD: 3 +.+.: &default_group_class[depth - 1]#3/2 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->configfs_dirent_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#19 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&default_group_class[depth - 1]#4/2 FD: 1 BD: 1 +.+.: nvmet_config_sem FD: 3 BD: 1 +.+.: subsys mutex#48 ->&k->k_lock FD: 133 BD: 4 +.+.: &default_group_class[depth - 1]#4/2 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->configfs_dirent_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#19 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&default_group_class[depth - 1]#5/2 FD: 132 BD: 5 +.+.: &default_group_class[depth - 1]#5/2 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->configfs_dirent_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#19 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&default_group_class[depth - 1]#6 ->&default_group_class[depth - 1]#6/2 FD: 129 BD: 6 +.+.: &default_group_class[depth - 1]#6 ->fs_reclaim ->pool_lock#2 ->configfs_dirent_lock FD: 1 BD: 6 +.+.: &default_group_class[depth - 1]#6/2 FD: 1 BD: 1 +.+.: backend_mutex FD: 1 BD: 1 +.+.: scsi_mib_index_lock FD: 1 BD: 1 +.+.: hba_lock FD: 128 BD: 1 +.+.: device_mutex ->fs_reclaim ->pool_lock#2 FD: 1 BD: 1 +.+.: &hba->device_lock FD: 325 BD: 1 +.+.: mtd_table_mutex ->fs_reclaim ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&c->lock ->&____s->seqcount ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&x->wait#11 ->&rq->__lock ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#49 ->devtree_lock ->nvmem_ida.xa_lock ->nvmem_cell_mutex ->&k->k_lock ->&zone->lock ->subsys mutex#50 ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->(console_sem).lock ->pcpu_alloc_mutex ->cpu_hotplug_lock ->batched_entropy_u32.lock ->mmu_notifier_invalidate_range_start ->blk_queue_ida.xa_lock ->&q->sysfs_lock ->&set->tag_list_lock ->bio_slab_lock ->percpu_counters_lock ->&sb->s_type->i_lock_key#3 ->&s->s_inode_list_lock ->&xa->xa_lock#9 ->&q->mq_freeze_lock ->set->srcu ->percpu_ref_switch_lock ->&q->queue_lock ->&bdev->bd_size_lock ->elv_list_lock ->(work_completion)(&(&q->requeue_work)->work) ->(work_completion)(&(&hctx->run_work)->work) ->&n->list_lock ->&q->debugfs_mutex ->subsys mutex#37 ->dev_hotplug_mutex ->&q->sysfs_dir_lock ->subsys mutex#38 ->cgwb_lock ->bdi_lock ->inode_hash_lock FD: 1 BD: 1 +.+.: part_parser_lock FD: 1 BD: 80 ....: (kmod_concurrent_max).lock FD: 29 BD: 81 ....: &x->wait#17 ->&p->pi_lock FD: 1 BD: 156 ....: &prev->lock FD: 3 BD: 2 +.+.: subsys mutex#49 ->&k->k_lock FD: 1 BD: 2 ....: nvmem_ida.xa_lock FD: 1 BD: 2 +.+.: nvmem_cell_mutex FD: 1 BD: 2 +.+.: subsys mutex#50 FD: 906 BD: 1 +.+.: (wq_completion)gid-cache-wq ->(work_completion)(&ndev_work->work) ->(work_completion)(&work->work) ->&rq->__lock FD: 1 BD: 72 +.+.: &bond->stats_lock FD: 904 BD: 2 +.+.: (work_completion)(&ndev_work->work) ->devices_rwsem ->&obj_hash[i].lock ->pool_lock#2 ->&base->lock ->&rq->__lock ->&meta->lock ->kfence_freelist_lock ->quarantine_lock FD: 16 BD: 94 ....: lweventlist_lock ->pool_lock#2 ->&dir->lock#2 ->&c->lock ->&____s->seqcount ->&n->list_lock ->&obj_hash[i].lock ->&base->lock ->&____s->seqcount#2 ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 871 BD: 2 +.+.: (linkwatch_work).work ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->stock_lock FD: 1 BD: 3774 +.+.: rtnl_mutex.wait_lock FD: 3 BD: 77 ..-.: once_lock ->crngs.lock FD: 284 BD: 2 +.+.: (work_completion)(&w->work) ->cpu_hotplug_lock ->&obj_hash[i].lock ->&meta->lock ->kfence_freelist_lock FD: 28 BD: 76 ++++: (inet6addr_validator_chain).rwsem ->&rq->__lock FD: 28 BD: 72 ++++: (inetaddr_validator_chain).rwsem ->&rq->__lock FD: 3 BD: 1 +.+.: subsys mutex#51 ->&k->k_lock FD: 1 BD: 1 +.+.: mdio_board_lock FD: 1 BD: 75 +.+.: mode_list_lock FD: 1 BD: 73 +.+.: napi_hash_lock FD: 132 BD: 144 +.+.: xps_map_mutex ->fs_reclaim ->pool_lock#2 ->jump_label_mutex ->&obj_hash[i].lock ->krc.lock ->&rq->__lock FD: 1 BD: 2 +.+.: (work_completion)(&vi->config_work) FD: 1 BD: 1 +.+.: l3mdev_lock FD: 3 BD: 1 +.+.: subsys mutex#52 ->&k->k_lock FD: 6 BD: 1 +.+.: compressor_list_lock ->pool_lock#2 ->&zone->lock ->&____s->seqcount ->&obj_hash[i].lock FD: 1 BD: 5 ....: hwsim_netgroup_ida.xa_lock FD: 34 BD: 3800 +.-.: hwsim_radio_lock ->pool_lock#2 ->&list->lock#16 ->&c->lock ->&n->list_lock ->&____s->seqcount#2 ->&____s->seqcount ->&zone->lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->init_task.mems_allowed_seq.seqcount FD: 3 BD: 6 +.+.: subsys mutex#53 ->&k->k_lock FD: 324 BD: 74 +.+.: &rdev->wiphy.mtx ->fs_reclaim ->pool_lock#2 ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->&obj_hash[i].lock ->running_helpers_waitq.lock ->&k->k_lock ->subsys mutex#54 ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->nl_table_lock ->nl_table_wait.lock ->reg_requests_lock ->stack_depot_init_mutex ->pcpu_alloc_mutex ->&local->iflist_mtx ->&xa->xa_lock#3 ->net_rwsem ->&x->wait#9 ->subsys mutex#17 ->&dir->lock#2 ->dev_hotplug_mutex ->dev_base_lock ->input_pool.lock ->batched_entropy_u32.lock ->&tbl->lock ->sysctl_lock ->&wdev->mtx ->&fq->lock ->rlock-AF_NETLINK ->lweventlist_lock ->&pool->lock ->rcu_node_0 ->&rq->__lock ->&data->mutex ->&base->lock ->&n->list_lock ->&tn->lock ->failover_lock ->proc_subdir_lock ->proc_inum_ida.xa_lock ->&idev->mc_lock ->&pnettable->lock ->smc_ib_devices.mutex ->&wdev->event_lock ->&rdev->mgmt_registrations_lock ->(work_completion)(&(&sdata->dec_tailroom_needed_wk)->work) ->&local->key_mtx ->&dentry->d_lock ->&fsnotify_mark_srcu ->&sb->s_type->i_lock_key#7 ->&s->s_inode_list_lock ->&xa->xa_lock#8 ->mount_lock ->&rdev->wiphy_work_lock ->(&dwork->timer) ->(work_completion)(&(&link->color_collision_detect_work)->work) ->&local->chanctx_mtx ->rtnl_mutex.wait_lock ->&p->pi_lock ->&lock->wait_lock ->&list->lock#15 ->lock#6 ->&____s->seqcount#2 ->quarantine_lock ->stock_lock ->uevent_sock_mutex.wait_lock ->&local->mtx ->remove_cache_srcu ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&local->queue_stop_reason_lock ->&local->sta_mtx ->_xmit_ETHER ->(&local->dynamic_ps_timer) ->(work_completion)(&local->dynamic_ps_enable_work) ->(work_completion)(&sdata->recalc_smps) ->(work_completion)(&link->csa_finalize_work) ->(work_completion)(&link->color_change_finalize_work) ->(work_completion)(&(&link->dfs_cac_timer_work)->work) ->&local->filter_lock ->&cfs_rq->removed.lock ->&rcu_state.expedited_wq ->&sem->wait_lock ->pcpu_lock ->krc.lock ->kernfs_rename_lock ->&sb->s_type->i_mutex_key#3/1 FD: 3 BD: 75 +.+.: subsys mutex#54 ->&k->k_lock FD: 1 BD: 75 +.+.: reg_requests_lock FD: 1 BD: 75 +.+.: &local->iflist_mtx FD: 177 BD: 78 +.+.: &wdev->mtx ->&rdev->bss_lock ->&local->chanctx_mtx ->&rdev->wiphy_work_lock ->&rq->__lock ->&ifibss->incomplete_lock ->(console_sem).lock ->console_owner_lock ->console_owner ->&local->mtx ->fs_reclaim ->pool_lock#2 ->tk_core.seq.seqcount ->hrtimer_bases.lock ->&obj_hash[i].lock ->&base->lock ->&wdev->event_lock ->&c->lock ->nl_table_lock ->nl_table_wait.lock ->&list->lock#2 ->&____s->seqcount ->&zone->lock ->&sta->lock ->&local->sta_mtx ->&cfs_rq->removed.lock ->&____s->seqcount#2 ->rcu_node_0 ->&rcu_state.expedited_wq ->&n->list_lock ->quarantine_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->lweventlist_lock ->krc.lock ->&list->lock#15 ->(&ifibss->timer) ->&rnp->exp_lock ->rcu_state.exp_mutex ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock ->&lock->wait_lock FD: 5 BD: 3798 +.-.: &fq->lock ->tk_core.seq.seqcount FD: 3 BD: 72 +.+.: subsys mutex#55 ->&k->k_lock FD: 129 BD: 73 +.+.: &sdata->sec_mtx ->&sec->lock ->fs_reclaim ->pool_lock#2 FD: 1 BD: 141 ++..: &sec->lock FD: 1 BD: 72 +.+.: &local->iflist_mtx#2 FD: 128 BD: 1 +.+.: hwsim_phys_lock ->fs_reclaim ->pool_lock#2 ->&____s->seqcount ->&zone->lock ->&obj_hash[i].lock FD: 1 BD: 3 ....: sd_index_ida.xa_lock FD: 3 BD: 3 +.+.: subsys mutex#56 ->&k->k_lock FD: 2 BD: 248 ....: sg_index_lock ->pool_lock#2 FD: 3 BD: 5 +.+.: subsys mutex#57 ->&k->k_lock FD: 1 BD: 4 ....: bsg_minor_ida.xa_lock FD: 128 BD: 1 +.+.: xdomain_lock ->fs_reclaim ->pool_lock#2 FD: 1 BD: 1 +.+.: ioctl_mutex FD: 1 BD: 1 +.+.: address_handler_list_lock FD: 1 BD: 1 +.+.: card_mutex FD: 3 BD: 4 +.+.: subsys mutex#58 ->&k->k_lock FD: 3 BD: 1 +.+.: subsys mutex#59 ->&k->k_lock FD: 29 BD: 1 ....: &x->wait#18 ->&p->pi_lock FD: 1 BD: 181 +.+.: &dd->lock FD: 29 BD: 4399 ..-.: &folio_wait_table[i] ->&p->pi_lock FD: 39 BD: 1 +.+.: (wq_completion)kblockd ->(work_completion)(&(&hctx->run_work)->work) ->(work_completion)(&q->timeout_work) ->(work_completion)(&(&q->requeue_work)->work) FD: 31 BD: 2 ..-.: &txlock ->&list->lock#3 ->&txwq FD: 1 BD: 3 ..-.: &list->lock#3 FD: 29 BD: 3 ..-.: &txwq ->&p->pi_lock FD: 2 BD: 1 ....: &iocq[i].lock ->&ktiowq[i] FD: 1 BD: 2 ....: &ktiowq[i] FD: 1 BD: 1 ....: rcu_read_lock_bh FD: 1 BD: 3732 +.-.: noop_qdisc.q.lock FD: 3 BD: 3 +.+.: subsys mutex#60 ->&k->k_lock FD: 218 BD: 1 +.+.: usb_bus_idr_lock ->(usb_notifier_list).rwsem ->fs_reclaim ->pool_lock#2 ->mmu_notifier_invalidate_range_start ->hcd_root_hub_lock ->&obj_hash[i].lock ->&rq->__lock ->&x->wait#19 ->&dev->power.lock ->device_links_srcu ->&c->lock ->&____s->seqcount ->&zone->lock ->(console_sem).lock ->input_pool.lock ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&k->k_lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&x->wait#11 ->&(&priv->bus_notifier)->rwsem ->uevent_sock_mutex ->running_helpers_waitq.lock ->device_state_lock ->&dum_hcd->dum->lock ->subsys mutex#61 ->&x->wait#9 ->&lock->wait_lock ->&hub->irq_urb_lock ->(&hub->irq_urb_retry) ->&base->lock ->hcd_urb_unlink_lock ->(work_completion)(&hub->tt.clear_work) ->hcd_urb_list_lock ->&cfs_rq->removed.lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&vhci_hcd->vhci->lock ->quarantine_lock FD: 164 BD: 1 +.+.: table_lock ->&k->list_lock ->fs_reclaim ->pool_lock#2 ->lock ->&root->kernfs_rwsem ->&k->k_lock ->uevent_sock_mutex ->&c->lock ->&zone->lock ->&____s->seqcount ->&obj_hash[i].lock ->running_helpers_waitq.lock ->(console_sem).lock ->&rq->__lock FD: 1 BD: 3 +.+.: mon_lock FD: 163 BD: 2 +.+.: usb_port_peer_mutex ->fs_reclaim ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->lock ->&root->kernfs_rwsem ->bus_type_sem ->&dev->power.lock ->dpm_list_mtx ->&k->k_lock ->dev_pm_qos_mtx ->component_mutex ->device_links_srcu ->dev_pm_qos_sysfs_mtx ->&rq->__lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->sysfs_symlink_target_lock FD: 1 BD: 2 ....: device_state_lock FD: 31 BD: 6 ....: hcd_root_hub_lock ->hcd_urb_list_lock ->&bh->lock ->&p->pi_lock FD: 1 BD: 7 ....: hcd_urb_list_lock FD: 1 BD: 7 ..-.: &bh->lock FD: 4 BD: 93 ..-.: lock#6 ->kcov_remote_lock ->&kcov->lock FD: 2 BD: 156 ..-.: kcov_remote_lock ->pool_lock#2 FD: 29 BD: 6 ..-.: &x->wait#19 ->&p->pi_lock FD: 1 BD: 2 +.+.: set_config_lock FD: 134 BD: 2 +.+.: hcd->bandwidth_mutex ->devtree_lock ->&obj_hash[i].lock ->&x->wait#9 ->pool_lock#2 ->&dev->power.lock ->fs_reclaim ->mmu_notifier_invalidate_range_start ->hcd_root_hub_lock ->&rq->__lock ->&x->wait#19 ->&zone->lock ->&____s->seqcount ->&c->lock FD: 1 BD: 2 +.+.: &new_driver->dynids.lock FD: 1 BD: 5 ....: &dum_hcd->dum->lock FD: 135 BD: 4 +.+.: &hub->status_mutex ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->hcd_root_hub_lock ->fs_reclaim ->&dum_hcd->dum->lock ->&obj_hash[i].lock ->&rq->__lock ->&x->wait#19 ->&base->lock ->&pool->lock ->(&timer.timer) ->&vhci_hcd->vhci->lock ->&zone->lock ->&____s->seqcount ->&c->lock FD: 1 BD: 3 +.+.: component_mutex FD: 137 BD: 2 +.+.: (work_completion)(&(&hub->init_work)->work) FD: 1 BD: 2 +.+.: subsys mutex#61 FD: 38 BD: 1 +.+.: (wq_completion)usb_hub_wq ->(work_completion)(&hub->events) FD: 37 BD: 2 +.+.: (work_completion)(&hub->events) ->lock#6 ->&dev->power.lock FD: 1 BD: 2 ....: &hub->irq_urb_lock FD: 1 BD: 2 ....: (&hub->irq_urb_retry) FD: 1 BD: 2 ....: hcd_urb_unlink_lock FD: 1 BD: 1 ..-.: usb_kill_urb_queue.lock FD: 1 BD: 2 +.+.: (work_completion)(&hub->tt.clear_work) FD: 31 BD: 1 ..-.: lib/debugobjects.c:101 FD: 31 BD: 2 +.+.: (debug_obj_work).work ->pool_lock#2 ->rcu_node_0 ->&rq->__lock ->&meta->lock ->kfence_freelist_lock ->quarantine_lock FD: 1 BD: 8 +.+.: udc_lock FD: 3 BD: 1 +.+.: subsys mutex#62 ->&k->k_lock FD: 1 BD: 1 ....: gadget_id_numbers.xa_lock FD: 134 BD: 2 +.+.: (work_completion)(&gadget->work) ->&root->kernfs_rwsem ->kernfs_notify_lock FD: 31 BD: 165 ....: kernfs_notify_lock FD: 66 BD: 2 +.+.: kernfs_notify_work ->kernfs_notify_lock ->&root->kernfs_supers_rwsem FD: 64 BD: 7 ++++: &root->kernfs_supers_rwsem ->inode_hash_lock FD: 1 BD: 1 +.+.: subsys mutex#63 FD: 1 BD: 1 +.+.: func_lock FD: 1 BD: 1 +.+.: g_tf_lock FD: 1 BD: 5 ....: &vhci_hcd->vhci->lock FD: 31 BD: 1 ..-.: net/core/link_watch.c:31 FD: 41 BD: 7 -.-.: i8042_lock ->(console_sem).lock ->&x->wait#20 FD: 29 BD: 8 -...: &x->wait#20 ->&p->pi_lock FD: 1 BD: 162 ....: irq_resend_lock FD: 1 BD: 85 +.+.: &ent->pde_unload_lock FD: 31 BD: 4 ....: serio_event_lock ->pool_lock#2 FD: 246 BD: 1 +.+.: (wq_completion)events_long ->serio_event_work ->(work_completion)(&(&ipvs->defense_work)->work) ->(work_completion)(&(&br->gc_work)->work) ->&rq->__lock ->(work_completion)(&(&ipvs->est_reload_work)->work) ->(work_completion)(&br->mcast_gc_work) ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 220 BD: 2 +.+.: serio_event_work ->serio_mutex FD: 219 BD: 3 +.+.: serio_mutex ->serio_event_lock ->i8042_lock ->fs_reclaim ->pool_lock#2 ->&k->list_lock ->lock ->&root->kernfs_rwsem ->&device->physical_node_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->semaphore->lock ->&obj_hash[i].lock ->sysfs_symlink_target_lock ->&k->k_lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#64 ->bus_type_sem FD: 1 BD: 4 +.+.: subsys mutex#64 FD: 2 BD: 7 ....: input_ida.xa_lock ->pool_lock#2 FD: 46 BD: 7 +.+.: &mousedev->mutex/1 ->&mousedev->mutex#2 FD: 206 BD: 4 +.+.: &serio->drv_mutex ->fs_reclaim ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&serio->lock ->i8042_mutex ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->running_helpers_waitq.lock ->&k->k_lock ->subsys mutex#30 ->(console_sem).lock ->input_mutex ->i8042_lock ->quarantine_lock ->psmouse_mutex FD: 36 BD: 7 -.-.: &serio->lock ->&ps2dev->wait ->&dev->power.lock ->&dev->event_lock#2 FD: 46 BD: 6 +.+.: i8042_mutex ->&serio->lock ->i8042_lock ->&ps2dev->wait ->&obj_hash[i].lock ->&base->lock ->&pool->lock ->&rq->__lock ->(&timer.timer) ->&cfs_rq->removed.lock ->pool_lock#2 FD: 29 BD: 8 -.-.: &ps2dev->wait ->&p->pi_lock FD: 1 BD: 1 ....: rtc_ida.xa_lock FD: 2 BD: 1 +.+.: &rtc->ops_lock ->rtc_lock FD: 1 BD: 2 ....: platform_devid_ida.xa_lock FD: 1 BD: 2 ....: rtcdev_lock FD: 128 BD: 1 +.+.: g_smscore_deviceslock ->fs_reclaim ->pool_lock#2 FD: 1 BD: 1 +.+.: cx231xx_devlist_mutex FD: 1 BD: 1 +.+.: em28xx_devlist_mutex FD: 174 BD: 7 +.+.: &led_cdev->led_access ->fs_reclaim ->&c->lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->running_helpers_waitq.lock ->&k->k_lock ->subsys mutex#65 ->leds_list_lock ->triggers_list_lock FD: 3 BD: 8 +.+.: subsys mutex#65 ->&k->k_lock FD: 142 BD: 20 +.+.: &led_cdev->trigger_lock ->fs_reclaim ->pool_lock#2 ->&trig->leddev_list_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->uevent_sock_mutex ->&obj_hash[i].lock ->running_helpers_waitq.lock FD: 1 BD: 21 +.+.: &trig->leddev_list_lock FD: 1 BD: 23 -...: &dev->event_lock#2 FD: 205 BD: 5 +.+.: psmouse_mutex ->fs_reclaim ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&serio->lock ->i8042_mutex ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->running_helpers_waitq.lock ->&rq->__lock ->&k->k_lock ->subsys mutex#30 ->(console_sem).lock ->input_mutex FD: 1 BD: 1 ....: pvr2_context_sync_data.lock FD: 1 BD: 15 +.+.: i2c_dev_list_lock FD: 30 BD: 8 +.+.: subsys mutex#66 ->&rq->__lock ->&k->k_lock FD: 1 BD: 1 +.+.: subsys mutex#67 FD: 177 BD: 2 +.+.: dvbdev_register_lock ->(console_sem).lock ->fs_reclaim ->pool_lock#2 ->minor_rwsem ->&xa->xa_lock#11 ->&mdev->graph_mutex ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&rq->__lock ->&x->wait#11 ->uevent_sock_mutex ->running_helpers_waitq.lock ->&k->k_lock ->subsys mutex#68 FD: 177 BD: 1 +.+.: frontend_mutex ->fs_reclaim ->pool_lock#2 ->(console_sem).lock ->dvbdev_register_lock FD: 1 BD: 3 +.+.: minor_rwsem FD: 2 BD: 3 ....: &xa->xa_lock#11 ->pool_lock#2 FD: 128 BD: 4 +.+.: &mdev->graph_mutex ->fs_reclaim ->pool_lock#2 ->&zone->lock ->&____s->seqcount ->&obj_hash[i].lock FD: 3 BD: 3 +.+.: subsys mutex#68 ->&k->k_lock FD: 1 BD: 1 ....: &dmxdev->lock FD: 1 BD: 1 +.+.: &dvbdemux->mutex FD: 1 BD: 1 +.+.: media_devnode_lock FD: 1 BD: 1 +.+.: subsys mutex#69 FD: 28 BD: 1 +.+.: videodev_lock ->&rq->__lock FD: 3 BD: 1 +.+.: subsys mutex#70 ->&k->k_lock FD: 1 BD: 1 +.+.: vimc_sensor:393:(&vsensor->hdl)->_lock FD: 1 BD: 1 +.+.: &v4l2_dev->lock FD: 1 BD: 1 +.+.: vimc_debayer:578:(&vdebayer->hdl)->_lock FD: 1 BD: 1 +.+.: vimc_lens:61:(&vlens->hdl)->_lock FD: 138 BD: 1 +.+.: vivid_ctrls:1606:(hdl_user_gen)->_lock ->vivid_ctrls:1620:(hdl_vid_cap)->_lock ->fs_reclaim ->pool_lock#2 ->&c->lock ->&____s->seqcount ->vivid_ctrls:1622:(hdl_vid_out)->_lock ->vivid_ctrls:1625:(hdl_vbi_cap)->_lock ->vivid_ctrls:1627:(hdl_vbi_out)->_lock ->vivid_ctrls:1630:(hdl_radio_rx)->_lock ->vivid_ctrls:1632:(hdl_radio_tx)->_lock ->vivid_ctrls:1634:(hdl_sdr_cap)->_lock ->vivid_ctrls:1636:(hdl_meta_cap)->_lock ->vivid_ctrls:1638:(hdl_meta_out)->_lock ->&zone->lock ->vivid_ctrls:1640:(hdl_tch_cap)->_lock ->&obj_hash[i].lock FD: 129 BD: 1 +.+.: vivid_ctrls:1608:(hdl_user_vid)->_lock ->vivid_ctrls:1620:(hdl_vid_cap)->_lock ->fs_reclaim ->pool_lock#2 ->&c->lock ->&____s->seqcount ->&zone->lock FD: 132 BD: 1 +.+.: vivid_ctrls:1610:(hdl_user_aud)->_lock ->vivid_ctrls:1620:(hdl_vid_cap)->_lock ->fs_reclaim ->pool_lock#2 ->vivid_ctrls:1622:(hdl_vid_out)->_lock ->vivid_ctrls:1630:(hdl_radio_rx)->_lock ->vivid_ctrls:1632:(hdl_radio_tx)->_lock ->&c->lock ->&____s->seqcount ->&zone->lock FD: 136 BD: 1 +.+.: vivid_ctrls:1612:(hdl_streaming)->_lock ->vivid_ctrls:1620:(hdl_vid_cap)->_lock ->fs_reclaim ->pool_lock#2 ->vivid_ctrls:1622:(hdl_vid_out)->_lock ->vivid_ctrls:1625:(hdl_vbi_cap)->_lock ->vivid_ctrls:1627:(hdl_vbi_out)->_lock ->vivid_ctrls:1634:(hdl_sdr_cap)->_lock ->vivid_ctrls:1636:(hdl_meta_cap)->_lock ->vivid_ctrls:1638:(hdl_meta_out)->_lock ->vivid_ctrls:1640:(hdl_tch_cap)->_lock ->&c->lock ->&____s->seqcount ->&zone->lock FD: 130 BD: 1 +.+.: vivid_ctrls:1614:(hdl_sdtv_cap)->_lock ->vivid_ctrls:1620:(hdl_vid_cap)->_lock ->fs_reclaim ->pool_lock#2 ->vivid_ctrls:1625:(hdl_vbi_cap)->_lock ->&c->lock ->&____s->seqcount FD: 130 BD: 1 +.+.: vivid_ctrls:1616:(hdl_loop_cap)->_lock ->vivid_ctrls:1620:(hdl_vid_cap)->_lock ->fs_reclaim ->pool_lock#2 ->vivid_ctrls:1625:(hdl_vbi_cap)->_lock ->&c->lock ->&____s->seqcount FD: 1 BD: 1 +.+.: vivid_ctrls:1618:(hdl_fb)->_lock FD: 1 BD: 7 +.+.: vivid_ctrls:1620:(hdl_vid_cap)->_lock FD: 1 BD: 4 +.+.: vivid_ctrls:1622:(hdl_vid_out)->_lock FD: 1 BD: 5 +.+.: vivid_ctrls:1625:(hdl_vbi_cap)->_lock FD: 1 BD: 3 +.+.: vivid_ctrls:1627:(hdl_vbi_out)->_lock FD: 1 BD: 3 +.+.: vivid_ctrls:1630:(hdl_radio_rx)->_lock FD: 1 BD: 3 +.+.: vivid_ctrls:1632:(hdl_radio_tx)->_lock FD: 1 BD: 3 +.+.: vivid_ctrls:1634:(hdl_sdr_cap)->_lock FD: 1 BD: 3 +.+.: vivid_ctrls:1636:(hdl_meta_cap)->_lock FD: 1 BD: 3 +.+.: vivid_ctrls:1638:(hdl_meta_out)->_lock FD: 1 BD: 3 +.+.: vivid_ctrls:1640:(hdl_tch_cap)->_lock FD: 1 BD: 1 ....: &adap->kthread_waitq FD: 1 BD: 1 +.+.: cec_devnode_lock FD: 1 BD: 1 +.+.: &dev->cec_xfers_slock FD: 1 BD: 1 ....: &dev->kthread_waitq_cec FD: 1 BD: 1 +.+.: subsys mutex#71 FD: 6 BD: 1 +.+.: &adap->lock ->tk_core.seq.seqcount ->&adap->devnode.lock_fhs FD: 1 BD: 2 +.+.: &adap->devnode.lock_fhs FD: 1 BD: 1 ....: ptp_clocks_map.xa_lock FD: 3 BD: 1 +.+.: subsys mutex#72 ->&k->k_lock FD: 1 BD: 1 +.+.: pers_lock FD: 1 BD: 1 +.+.: _lock FD: 1 BD: 3 +.+.: dm_bufio_clients_lock FD: 1 BD: 1 +.+.: _ps_lock FD: 1 BD: 1 +.+.: _lock#2 FD: 1 BD: 1 +.+.: _lock#3 FD: 1 BD: 1 +.+.: register_lock#2 FD: 3 BD: 1 +.+.: subsys mutex#73 ->&k->k_lock FD: 1 BD: 1 .+.+: bp_lock FD: 3 BD: 1 +.+.: subsys mutex#74 ->&k->k_lock FD: 135 BD: 74 +.+.: lock#7 ->fs_reclaim ->pool_lock#2 ->&xa->xa_lock#13 ->&rq->__lock ->crngs.lock ->&xa->xa_lock#18 ->&id_priv->qp_mutex ->&id_priv->lock ->&xa->xa_lock#19 ->&cm_id_priv->lock FD: 15 BD: 1 +.-.: (&dsp_spl_tl) ->dsp_lock FD: 14 BD: 2 ..-.: dsp_lock ->iclock_lock ->&obj_hash[i].lock ->&base->lock FD: 5 BD: 3 ...-: iclock_lock ->tk_core.seq.seqcount FD: 1 BD: 1 +.+.: intf_mutex FD: 1 BD: 1 ....: iscsi_transport_lock FD: 3 BD: 1 +.+.: subsys mutex#75 ->&k->k_lock FD: 1 BD: 1 ....: &tx_task->waiting FD: 908 BD: 2 ++++: link_ops_rwsem ->fs_reclaim ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->(console_sem).lock ->&c->lock ->&pdata->netdev_lock ->ndev_hash_lock ->crypto_alg_sem ->devices_rwsem ->&rxe->usdev_lock ->rtnl_mutex ->&device->cache_lock ->rdmacg_mutex ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->&rq->__lock ->dpm_list_mtx ->subsys mutex#84 ->&zone->lock ->&____s->seqcount ->&sem->wait_lock ->&p->pi_lock ->&n->list_lock ->rcu_node_0 ->&____s->seqcount#2 ->uevent_sock_mutex ->devices_rwsem.wait_lock ->rtnl_mutex.wait_lock ->&rcu_state.expedited_wq ->batched_entropy_u8.lock ->kfence_freelist_lock ->remove_cache_srcu ->console_owner_lock ->console_owner ->&xa->xa_lock#20 ->&xa->xa_lock#18 ->krc.lock ->&xa->xa_lock#16 FD: 3 BD: 1 +.+.: subsys mutex#76 ->&k->k_lock FD: 1 BD: 1 +.+.: service_lock FD: 1 BD: 1 +.+.: vsock_register_mutex FD: 1 BD: 1 +.+.: comedi_drivers_list_lock FD: 3 BD: 6 +.+.: subsys mutex#77 ->&k->k_lock FD: 158 BD: 2 ++++: snd_ctl_layer_rwsem ->snd_ctl_led_mutex ->fs_reclaim ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->lock ->&root->kernfs_rwsem ->bus_type_sem ->&dev->power.lock ->dpm_list_mtx ->&k->k_lock ->sysfs_symlink_target_lock FD: 1 BD: 3 +.+.: snd_card_mutex FD: 1 BD: 1 +.+.: snd_ioctl_rwsem FD: 128 BD: 2 +.+.: strings ->fs_reclaim ->pool_lock#2 FD: 1 BD: 2 +.+.: register_mutex FD: 173 BD: 3 +.+.: sound_mutex ->fs_reclaim ->pool_lock#2 ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->&x->wait#11 ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#77 ->&k->k_lock FD: 182 BD: 1 +.+.: register_mutex#2 ->fs_reclaim ->pool_lock#2 ->sound_mutex ->&obj_hash[i].lock ->&rq->__lock ->register_mutex ->&c->lock ->&zone->lock ->&____s->seqcount ->sound_oss_mutex ->strings ->&entry->access ->info_mutex FD: 174 BD: 1 +.+.: register_mutex#3 ->fs_reclaim ->pool_lock#2 ->sound_mutex ->clients_lock ->&c->lock ->&zone->lock ->&____s->seqcount FD: 1 BD: 5 ....: clients_lock FD: 2 BD: 1 +.+.: &client->ports_mutex ->&client->ports_lock FD: 1 BD: 5 .+.+: &client->ports_lock FD: 175 BD: 1 +.+.: register_mutex#4 ->fs_reclaim ->pool_lock#2 ->sound_oss_mutex FD: 175 BD: 3 +.+.: sound_oss_mutex ->fs_reclaim ->pool_lock#2 ->sound_loader_lock ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&p->pi_lock ->&rq->__lock ->&x->wait#11 ->&c->lock ->&zone->lock ->&____s->seqcount ->uevent_sock_mutex ->running_helpers_waitq.lock ->subsys mutex#77 ->&k->k_lock FD: 1 BD: 4 +.+.: sound_loader_lock FD: 131 BD: 1 .+.+: &grp->list_mutex/1 ->clients_lock ->&client->ports_lock ->register_lock#3 ->fs_reclaim ->pool_lock#2 ->&c->lock ->&____s->seqcount FD: 2 BD: 1 +.+.: &grp->list_mutex#2 ->&grp->list_lock FD: 1 BD: 2 ....: &grp->list_lock FD: 138 BD: 2 +.+.: async_lookup_work ->fs_reclaim ->pool_lock#2 ->clients_lock ->&client->ports_lock ->snd_card_mutex ->(kmod_concurrent_max).lock ->&obj_hash[i].lock ->&x->wait#17 ->&pool->lock ->&rq->__lock ->running_helpers_waitq.lock ->autoload_work ->&x->wait#10 FD: 1 BD: 2 ....: register_lock#3 FD: 160 BD: 1 ++++: &card->controls_rwsem ->&xa->xa_lock#12 ->fs_reclaim ->&card->ctl_files_rwlock ->snd_ctl_layer_rwsem ->pool_lock#2 ->&c->lock ->&____s->seqcount FD: 9 BD: 2 +.+.: &xa->xa_lock#12 ->pool_lock#2 ->&c->lock ->&zone->lock ->&____s->seqcount FD: 1 BD: 2 ....: &card->ctl_files_rwlock FD: 4 BD: 3 +.+.: autoload_work ->&k->list_lock ->&k->k_lock FD: 1 BD: 3 +.+.: snd_ctl_led_mutex FD: 1 BD: 1 +.+.: register_mutex#5 FD: 33 BD: 1 ..-.: drivers/block/floppy.c:640 FD: 38 BD: 1 +.+.: (fd_timeout).work ->&obj_hash[i].lock ->floppy_work ->dma_spin_lock ->floppy_lock ->command_done.lock FD: 1 BD: 75 +.+.: failover_lock FD: 10 BD: 5 +...: llc_sap_list_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&____s->seqcount#2 FD: 128 BD: 1 +.+.: act_id_mutex ->fs_reclaim ->pool_lock#2 ->&rq->__lock FD: 1 BD: 72 ++++: act_mod_lock FD: 1 BD: 3 ....: fdc_wait.lock FD: 1 BD: 1 +.+.: ife_mod_lock FD: 1 BD: 72 +.+.: nf_connlabels_lock FD: 1 BD: 3 ....: (&motor_off_timer[drive]) FD: 1 BD: 311 ....: (&sq->pending_timer) FD: 1 BD: 3 +.+.: (work_completion)(&td->dispatch_work) FD: 36 BD: 5 +.+.: &q->blkcg_mutex ->&q->queue_lock ->&rq->__lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 72 ++++: cls_mod_lock FD: 1 BD: 72 ++++: ematch_mod_lock FD: 152 BD: 2 +.+.: sock_diag_table_mutex ->inet_diag_table_mutex ->nlk_cb_mutex-SOCK_DIAG ->rlock-AF_NETLINK FD: 132 BD: 1 +.+.: nfnl_subsys_acct ->nlk_cb_mutex-NETFILTER FD: 1 BD: 1 +.+.: nfnl_subsys_queue FD: 33 BD: 1 +.+.: nfnl_subsys_ulog ->&log->instances_lock ->&inst->lock ->&rq->__lock FD: 1 BD: 5 +.+.: nf_log_mutex FD: 1 BD: 1 +.+.: nfnl_subsys_osf FD: 37 BD: 5 +.+.: nf_sockopt_mutex ->&rq->__lock ->nf_sockopt_mutex.wait_lock ->pgd_lock ->stock_lock ->pool_lock#2 ->&obj_hash[i].lock ->key ->pcpu_lock ->percpu_counters_lock ->&cfs_rq->removed.lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 139 BD: 4 +.+.: nfnl_subsys_ctnetlink ->nf_conntrack_mutex ->nlk_cb_mutex-NETFILTER ->pool_lock#2 ->&rq->__lock ->(console_sem).lock ->&obj_hash[i].lock FD: 2 BD: 1 +.+.: nfnl_subsys_ctnetlink_exp ->nf_conntrack_expect_lock FD: 1 BD: 5 +.+.: nf_ct_ecache_mutex FD: 128 BD: 1 +.+.: nfnl_subsys_cttimeout ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock FD: 1 BD: 1 +.+.: nfnl_subsys_cthelper FD: 1 BD: 1 +.+.: nf_ct_helper_mutex FD: 1 BD: 2 +...: nf_conntrack_expect_lock FD: 39 BD: 2 +.+.: (work_completion)(&blkg->free_work) ->&q->blkcg_mutex ->&obj_hash[i].lock ->pool_lock#2 ->&xa->xa_lock#10 ->pcpu_lock ->blk_queue_ida.xa_lock ->percpu_ref_switch_lock FD: 36 BD: 7 +.+.: nf_conntrack_mutex ->&nf_conntrack_locks[i] ->&rq->__lock ->&____s->seqcount#7 ->&nf_conntrack_locks[i]/1 ->&obj_hash[i].lock ->pool_lock#2 ->&c->lock ->&cfs_rq->removed.lock ->nf_conntrack_mutex.wait_lock ->&pool->lock FD: 1 BD: 1 +.+.: nf_ct_nat_helpers_mutex FD: 298 BD: 4 +.+.: nfnl_subsys_nftables ->&nft_net->commit_mutex FD: 1 BD: 1 +.+.: nfnl_subsys_nftcompat FD: 1019 BD: 1 +.+.: masq_mutex ->pernet_ops_rwsem ->(inetaddr_chain).rwsem ->inet6addr_chain.lock FD: 215 BD: 5 +.+.: &xt[i].mutex ->fs_reclaim ->pool_lock#2 ->&mm->mmap_lock ->free_vmap_area_lock ->vmap_area_lock ->&____s->seqcount ->&per_cpu(xt_recseq, i) ->&obj_hash[i].lock ->purge_vmap_area_lock ->&c->lock ->&n->list_lock ->init_mm.page_table_lock ->&____s->seqcount#2 ->&rq->__lock ->remove_cache_srcu ->rcu_node_0 ->pgd_lock ->stock_lock ->key ->pcpu_lock ->percpu_counters_lock ->&lock->wait_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&cfs_rq->removed.lock ->&rcu_state.expedited_wq ->&zone->lock ->&meta->lock ->quarantine_lock ->&base->lock FD: 31 BD: 3780 +.+.: &tn->lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->rcu_node_0 ->&rcu_state.expedited_wq FD: 3 BD: 1 +.+.: subsys mutex#78 ->&k->k_lock FD: 140 BD: 5 +.+.: nfnl_subsys_ipset ->fs_reclaim ->pool_lock#2 ->&____s->seqcount ->stock_lock ->crngs.lock ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount#2 ->&n->list_lock ->&rq->__lock ->&base->lock ->rcu_state.barrier_mutex ->ip_set_ref_lock ->(work_completion)(&(&gc->dwork)->work) ->nlk_cb_mutex-NETFILTER ->rcu_state.barrier_mutex.wait_lock ->&p->pi_lock ->remove_cache_srcu FD: 1 BD: 1 +.+.: ip_set_type_mutex FD: 137 BD: 81 +.+.: ipvs->est_mutex ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->&c->lock ->&n->list_lock ->pcpu_lock ->&obj_hash[i].lock ->&rq->__lock ->&____s->seqcount#2 ->&____s->seqcount ->pcpu_alloc_mutex.wait_lock ->&p->pi_lock ->kthread_create_lock ->&x->wait ->&pool->lock ->(console_sem).lock FD: 1 BD: 79 +.+.: ip_vs_sched_mutex FD: 128 BD: 5 +.+.: __ip_vs_app_mutex ->fs_reclaim ->&c->lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&n->list_lock ->&rq->__lock ->&obj_hash[i].lock ->&____s->seqcount#2 FD: 1 BD: 1 +.+.: ip_vs_pe_mutex FD: 1 BD: 1 +.+.: tunnel4_mutex FD: 1 BD: 1 +.+.: xfrm4_protocol_mutex FD: 30 BD: 4 +.+.: inet_diag_table_mutex ->&rq->__lock ->&h->lhash2[i].lock ->&hashinfo->ehash_locks[i] ->&cfs_rq->removed.lock FD: 1 BD: 1 +...: xfrm_km_lock FD: 1 BD: 1 +...: xfrm_translator_lock FD: 1 BD: 1 +.+.: xfrm6_protocol_mutex FD: 1 BD: 1 +.+.: tunnel6_mutex FD: 1 BD: 1 +.+.: xfrm_if_cb_lock FD: 1 BD: 1 +...: inetsw6_lock FD: 1 BD: 7 +.+.: &hashinfo->lock#2 FD: 19 BD: 5 +.+.: &net->ipv6.ip6addrlbl_table.lock ->&obj_hash[i].lock ->pool_lock#2 ->krc.lock FD: 209 BD: 3715 +.+.: &idev->mc_lock ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount ->&dev_addr_list_lock_key ->_xmit_ETHER ->&zone->lock ->batched_entropy_u32.lock ->&base->lock ->&n->list_lock ->remove_cache_srcu ->krc.lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&bridge_netdev_addr_lock_key ->&dev_addr_list_lock_key#2 ->&batadv_netdev_addr_lock_key ->&rq->__lock ->&vlan_netdev_addr_lock_key ->&macvlan_netdev_addr_lock_key ->&dev_addr_list_lock_key#3 ->&bridge_netdev_addr_lock_key/1 ->&dev_addr_list_lock_key/1 ->rcu_node_0 ->&dev_addr_list_lock_key#2/1 ->&pool->lock ->_xmit_ETHER/1 ->&batadv_netdev_addr_lock_key/1 ->&vlan_netdev_addr_lock_key/1 ->&macvlan_netdev_addr_lock_key/1 ->&dev_addr_list_lock_key#3/1 ->&macsec_netdev_addr_lock_key/1 ->&____s->seqcount#2 ->&rcu_state.expedited_wq ->_xmit_IPGRE ->_xmit_ETHER/2 ->&dev_addr_list_lock_key#3/2 ->&macvlan_netdev_addr_lock_key/2 ->&cfs_rq->removed.lock ->&lock->wait_lock FD: 19 BD: 3716 +...: &dev_addr_list_lock_key ->pool_lock#2 ->&obj_hash[i].lock ->krc.lock ->&c->lock ->&n->list_lock ->&____s->seqcount#2 ->&____s->seqcount FD: 44 BD: 3731 +...: _xmit_ETHER ->&local->filter_lock ->pool_lock#2 ->&c->lock ->&____s->seqcount ->&obj_hash[i].lock ->(console_sem).lock ->console_owner_lock ->console_owner ->&____s->seqcount#2 ->krc.lock ->&n->list_lock FD: 873 BD: 1 +.+.: (wq_completion)ipv6_addrconf ->(work_completion)(&(&net->ipv6.addr_chk_work)->work) ->(work_completion)(&(&ifa->dad_work)->work) ->&rq->__lock FD: 871 BD: 6 +.+.: (work_completion)(&(&net->ipv6.addr_chk_work)->work) ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&rq->__lock FD: 29 BD: 79 ....: &x->wait#21 ->&p->pi_lock FD: 59 BD: 3874 ++--: &ndev->lock ->&ifa->lock ->pool_lock#2 ->&dir->lock#2 ->pcpu_lock ->&obj_hash[i].lock ->&tb->tb6_lock ->&c->lock ->&n->list_lock ->&____s->seqcount ->batched_entropy_u32.lock ->&base->lock ->&____s->seqcount#2 FD: 11 BD: 1 +.+.: stp_proto_mutex ->llc_sap_list_lock FD: 1 BD: 1 ....: switchdev_notif_chain.lock FD: 28 BD: 72 ++++: (switchdev_blocking_notif_chain).rwsem ->&rq->__lock FD: 871 BD: 1 +.+.: br_ioctl_mutex ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&rq->__lock FD: 287 BD: 75 +.+.: nf_ct_proto_mutex ->defrag4_mutex ->nf_hook_mutex ->cpu_hotplug_lock ->&obj_hash[i].lock ->pool_lock#2 ->defrag6_mutex ->&rq->__lock FD: 215 BD: 5 +.+.: ebt_mutex ->fs_reclaim ->pool_lock#2 ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&rq->__lock ->&mm->mmap_lock ->&table->lock#3 ->ebt_mutex.wait_lock ->rcu_node_0 ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 1 BD: 1 +.+.: dsa_tag_drivers_lock FD: 1 BD: 1 +...: protocol_list_lock FD: 1 BD: 1 +...: linkfail_lock FD: 1 BD: 1 +...: rose_neigh_list_lock FD: 1 BD: 1 +.+.: proto_tab_lock#2 FD: 1 BD: 29 ++++: chan_list_lock FD: 1 BD: 4 +.+.: l2cap_sk_list.lock FD: 220 BD: 3 +.+.: sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP ->slock-AF_BLUETOOTH-BTPROTO_L2CAP ->chan_list_lock ->&ei->socket.wq.wait ->&mm->mmap_lock ->&rq->__lock FD: 1 BD: 30 +...: slock-AF_BLUETOOTH-BTPROTO_L2CAP FD: 1 BD: 1 ....: rfcomm_wq.lock FD: 1 BD: 3 +.+.: rfcomm_mutex FD: 1 BD: 1 +.+.: auth_domain_lock FD: 1 BD: 1 +.+.: registered_mechs_lock FD: 1 BD: 1 ....: atm_dev_notify_chain.lock FD: 1 BD: 1 +.+.: proto_tab_lock#3 FD: 873 BD: 1 +.+.: vlan_ioctl_mutex ->&mm->mmap_lock ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&rq->__lock ->vlan_ioctl_mutex.wait_lock ->rcu_state.barrier_mutex ->dev_base_lock ->lweventlist_lock ->pcpu_lock ->pool_lock#2 ->&dir->lock#2 ->&obj_hash[i].lock ->krc.lock ->netdev_unregistering_wq.lock ->rcu_node_0 ->&cfs_rq->removed.lock ->(console_sem).lock ->console_owner_lock ->console_owner ->&sem->wait_lock FD: 1 BD: 1 +.+.: rds_info_lock FD: 132 BD: 8 ++++: rds_trans_sem ->(console_sem).lock ->fs_reclaim ->pool_lock#2 ->crngs.lock ->&id_priv->handler_mutex ->id_table_lock ->&x->wait#28 ->&obj_hash[i].lock ->&c->lock FD: 1 BD: 80 ....: &id_priv->lock FD: 2 BD: 75 +.+.: &xa->xa_lock#13 ->pool_lock#2 FD: 404 BD: 86 +.+.: k-sk_lock-AF_INET6 ->k-slock-AF_INET6 ->&tcp_hashinfo.bhash[i].lock ->&h->lhash2[i].lock ->&table->hash[i].lock ->k-clock-AF_INET6 ->&queue->rskq_lock ->&obj_hash[i].lock ->pool_lock#2 ->&c->lock ->&____s->seqcount#8 ->batched_entropy_u32.lock ->tk_core.seq.seqcount ->&rq->__lock ->fs_reclaim ->rcu_node_0 ->&n->list_lock ->&base->lock ->slock-AF_INET6 ->cpu_hotplug_lock ->(console_sem).lock ->clock-AF_INET6 ->&dir->lock ->&hashinfo->ehash_locks[i] ->&____s->seqcount ->remove_cache_srcu ->elock-AF_INET6 ->&____s->seqcount#2 ->&idev->mc_lock ->crngs.lock ->&token_hash[i].lock FD: 117 BD: 91 +.-.: k-slock-AF_INET6 ->pool_lock#2 ->tk_core.seq.seqcount ->&c->lock ->&obj_hash[i].lock ->&tcp_hashinfo.bhash[i].lock ->elock-AF_INET6 ->&hashinfo->ehash_locks[i] ->(&req->rsk_timer) ->&base->lock ->&queue->rskq_lock ->k-clock-AF_INET6 ->krc.lock ->&____s->seqcount ->batched_entropy_u16.lock ->clock-AF_INET6 ->crngs.lock ->&(&bp->lock)->lock FD: 33 BD: 127 ++.-: k-clock-AF_INET6 FD: 26 BD: 119 +.-.: &tcp_hashinfo.bhash[i].lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&tcp_hashinfo.bhash2[i].lock ->k-clock-AF_INET6 ->clock-AF_INET ->clock-AF_INET6 ->stock_lock ->&obj_hash[i].lock ->k-clock-AF_INET ->quarantine_lock ->&____s->seqcount#2 ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&meta->lock ->kfence_freelist_lock ->&n->list_lock ->batched_entropy_u8.lock FD: 24 BD: 120 +.-.: &tcp_hashinfo.bhash2[i].lock ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&c->lock ->k-clock-AF_INET6 ->clock-AF_INET ->clock-AF_INET6 ->&obj_hash[i].lock ->batched_entropy_u8.lock ->&hashinfo->ehash_locks[i] ->stock_lock ->kfence_freelist_lock ->&meta->lock ->k-clock-AF_INET ->&____s->seqcount#2 ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&n->list_lock FD: 9 BD: 95 +.+.: &h->lhash2[i].lock ->clock-AF_INET6 ->reuseport_lock ->k-clock-AF_INET6 FD: 1 BD: 5 +...: &list->lock#4 FD: 41 BD: 80 ++..: k-clock-AF_TIPC ->&con->sub_lock FD: 149 BD: 76 +.+.: k-sk_lock-AF_TIPC ->k-slock-AF_TIPC ->&tn->nametbl_lock ->&obj_hash[i].lock ->k-clock-AF_TIPC ->fs_reclaim ->pool_lock#2 ->&dir->lock ->batched_entropy_u32.lock ->k-sk_lock-AF_TIPC/1 ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&list->lock#23 ->&base->lock ->rcu_node_0 ->&rq->__lock ->quarantine_lock ->&meta->lock ->kfence_freelist_lock ->&cfs_rq->removed.lock FD: 29 BD: 78 +...: k-slock-AF_TIPC ->&list->lock#19 ->&obj_hash[i].lock ->pool_lock#2 ->&list->lock#23 ->k-clock-AF_TIPC ->&c->lock ->&n->list_lock ->&____s->seqcount#2 ->&____s->seqcount FD: 39 BD: 82 +...: &tn->nametbl_lock ->pool_lock#2 ->&service->lock ->&c->lock ->&____s->seqcount ->&nt->cluster_scope_lock ->&obj_hash[i].lock ->krc.lock ->&____s->seqcount#2 ->&n->list_lock FD: 37 BD: 83 +...: &service->lock ->pool_lock#2 ->&obj_hash[i].lock ->krc.lock ->&c->lock ->&sub->lock ->&____s->seqcount ->&n->list_lock FD: 43 BD: 77 +.+.: &pnettable->lock ->&c->lock ->&n->list_lock ->&rq->__lock ->pool_lock#2 ->&dir->lock#2 ->(console_sem).lock ->&obj_hash[i].lock FD: 28 BD: 77 +.+.: smc_ib_devices.mutex ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 1 BD: 1 +.+.: smc_wr_rx_hash_lock FD: 1 BD: 1 +.+.: v9fs_trans_lock FD: 1 BD: 5 +...: &this->receive_lock FD: 1 BD: 1 +...: lowpan_nhc_lock FD: 873 BD: 9 +.+.: ovs_mutex ->(work_completion)(&data->gc_work) ->nf_ct_proto_mutex ->&obj_hash[i].lock ->&____s->seqcount ->pool_lock#2 ->nf_connlabels_lock ->net_rwsem ->&rq->__lock ->quarantine_lock ->krc.lock ->&c->lock ->fs_reclaim ->pcpu_alloc_mutex ->stock_lock ->stack_depot_init_mutex ->crngs.lock ->rtnl_mutex ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->remove_cache_srcu ->ovs_mutex.wait_lock ->&pool->lock ->&____s->seqcount#2 ->&n->list_lock FD: 285 BD: 76 +.+.: defrag4_mutex ->nf_hook_mutex ->cpu_hotplug_lock ->&obj_hash[i].lock ->pool_lock#2 ->&rq->__lock FD: 285 BD: 76 +.+.: defrag6_mutex ->nf_hook_mutex ->cpu_hotplug_lock ->&obj_hash[i].lock ->pool_lock#2 ->&rq->__lock FD: 1 BD: 146 +.+.: subsys mutex#79 FD: 38 BD: 2 +.+.: drain_vmap_work ->vmap_purge_lock FD: 31 BD: 1 ..-.: &(&gc_work->dwork)->timer FD: 40 BD: 2 +.+.: (work_completion)(&(&gc_work->dwork)->work) ->rcu_node_0 ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->&base->lock ->&rcu_state.expedited_wq FD: 1 BD: 3795 ...-: &____s->seqcount#7 FD: 31 BD: 1 ..-.: &(&ipvs->defense_work)->timer FD: 32 BD: 6 +.+.: (work_completion)(&(&ipvs->defense_work)->work) ->&s->s_inode_list_lock ->&ipvs->dropentry_lock ->&ipvs->droppacket_lock ->&ipvs->securetcp_lock ->&obj_hash[i].lock ->&base->lock ->&rq->__lock FD: 1 BD: 7 +...: &ipvs->dropentry_lock FD: 1 BD: 7 +...: &ipvs->droppacket_lock FD: 1 BD: 7 +...: &ipvs->securetcp_lock FD: 9 BD: 248 +...: map_idr_lock ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock FD: 1 BD: 1 ....: rcu_read_lock_sched FD: 12 BD: 248 +.-.: prog_idr_lock ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->&n->list_lock ->&____s->seqcount#2 ->&____s->seqcount FD: 1 BD: 78 +.-.: bpf_lock FD: 1 BD: 1 ....: rcu_read_lock_trace FD: 9 BD: 248 +...: btf_idr_lock ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock FD: 131 BD: 147 +.+.: &map->freeze_mutex ->&vma->vm_lock->lock ->vmap_area_lock ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->stock_lock ->ptlock_ptr(page) ->ptlock_ptr(page)#2 FD: 1 BD: 6 +.+.: ima_keys_lock FD: 131 BD: 145 +.+.: scomp_lock ->fs_reclaim ->pool_lock#2 ->free_vmap_area_lock ->vmap_area_lock ->&____s->seqcount ->init_mm.page_table_lock ->&zone->lock FD: 13 BD: 5 +.-.: (&net->can.stattimer) ->&obj_hash[i].lock ->&base->lock FD: 21 BD: 1 +.-.: (&vblank->disable_timer) ->&dev->vbl_lock FD: 29 BD: 1 +.+.: pcpu_drain_mutex ->&pcp->lock ->&rq->__lock FD: 487 BD: 5 +.+.: k-sk_lock-AF_RXRPC ->k-slock-AF_RXRPC ->&rxnet->local_mutex ->&local->services_lock ->fs_reclaim ->pool_lock#2 ->&c->lock ->&n->list_lock ->&rx->incoming_lock ->&obj_hash[i].lock ->&rxnet->conn_lock ->&call->waitq ->(rxrpc_call_limiter).lock ->&rx->recvmsg_lock ->&rx->call_lock ->&rxnet->call_lock ->(&call->timer) ->&base->lock ->&list->lock#21 ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq ->remove_cache_srcu FD: 1 BD: 6 +...: k-slock-AF_RXRPC FD: 474 BD: 7 +.+.: &rxnet->local_mutex ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->crngs.lock ->&c->lock ->&____s->seqcount ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#8 ->&dir->lock ->k-sk_lock-AF_INET6 ->k-slock-AF_INET6 ->cpu_hotplug_lock ->&rq->__lock ->kthread_create_lock ->&p->pi_lock ->&x->wait ->&x->wait#22 ->stock_lock ->&n->list_lock ->&____s->seqcount#2 ->&cfs_rq->removed.lock ->&table->hash[i].lock ->k-clock-AF_INET6 ->&xa->xa_lock#8 ->&fsnotify_mark_srcu ->k-sk_lock-AF_INET ->k-slock-AF_INET ->remove_cache_srcu FD: 16 BD: 93 +...: &table->hash[i].lock ->k-clock-AF_INET6 ->&table->hash2[i].lock ->k-clock-AF_INET ->clock-AF_INET ->clock-AF_INET6 ->reuseport_lock FD: 1 BD: 94 +...: &table->hash2[i].lock FD: 284 BD: 2 +.+.: netstamp_work ->cpu_hotplug_lock FD: 29 BD: 8 ....: &x->wait#22 ->&p->pi_lock FD: 1 BD: 9 +.+.: &local->services_lock FD: 1 BD: 10 +.+.: &rxnet->conn_lock FD: 29 BD: 8 ....: &call->waitq ->&p->pi_lock FD: 1 BD: 10 ++++: &rx->call_lock FD: 1 BD: 10 +.+.: &rxnet->call_lock FD: 33 BD: 5 +.-.: (&rxnet->peer_keepalive_timer) FD: 136 BD: 1 +.+.: init_user_ns.keyring_sem ->key_user_lock ->root_key_user.lock ->fs_reclaim ->pool_lock#2 ->crngs.lock ->key_serial_lock ->key_construction_mutex ->&type->lock_class ->keyring_serialise_link_lock FD: 1 BD: 5 +.+.: root_key_user.lock FD: 17 BD: 7 +.+.: (wq_completion)krxrpcd ->(work_completion)(&rxnet->peer_keepalive_work) ->(work_completion)(&rxnet->service_conn_reaper) FD: 14 BD: 8 +.+.: (work_completion)(&rxnet->peer_keepalive_work) ->&rxnet->peer_hash_lock ->&obj_hash[i].lock ->&base->lock FD: 1 BD: 6 +.+.: keyring_name_lock FD: 1 BD: 11 +.+.: &rxnet->peer_hash_lock FD: 1 BD: 1 +.+.: template_list FD: 1 BD: 1 +.+.: idr_lock FD: 129 BD: 10 +.+.: ima_extend_list_mutex ->fs_reclaim ->pool_lock#2 ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&n->list_lock ->&rq->__lock ->remove_cache_srcu FD: 1 BD: 1 +.+.: clk_debug_lock FD: 29 BD: 4 +.+.: deferred_probe_work ->deferred_probe_mutex FD: 131 BD: 72 ++++: &(&net->nexthop.notifier_chain)->rwsem ->&data->nh_lock FD: 285 BD: 84 +.+.: k-sk_lock-AF_INET ->k-slock-AF_INET ->&table->hash[i].lock ->&obj_hash[i].lock ->k-clock-AF_INET ->fs_reclaim ->&c->lock ->pool_lock#2 ->crngs.lock ->&token_hash[i].lock ->&rq->__lock ->&____s->seqcount#8 ->&tcp_hashinfo.bhash[i].lock ->batched_entropy_u32.lock ->tk_core.seq.seqcount ->batched_entropy_u16.lock ->&base->lock ->slock-AF_INET ->&____s->seqcount ->&hashinfo->ehash_locks[i] ->&ei->socket.wq.wait ->&mm->mmap_lock ->&n->list_lock ->stock_lock ->&____s->seqcount#2 FD: 86 BD: 88 +.-.: k-slock-AF_INET ->pool_lock#2 ->batched_entropy_u32.lock ->&obj_hash[i].lock ->tk_core.seq.seqcount ->&list->lock#5 ->&c->lock ->&n->list_lock ->elock-AF_INET ->&dir->lock#2 ->krc.lock ->&____s->seqcount#2 ->&____s->seqcount ->&base->lock ->&tcp_hashinfo.bhash[i].lock ->&(&bp->lock)->lock FD: 34 BD: 122 ++.-: k-clock-AF_INET FD: 871 BD: 2 +.+.: reg_work ->rtnl_mutex FD: 1 BD: 72 +...: reg_pending_beacons_lock FD: 884 BD: 2 +.+.: (work_completion)(&fw_work->work) ->fs_reclaim ->pool_lock#2 ->&fw_cache.lock ->tk_core.seq.seqcount ->async_lock ->init_task.alloc_lock ->&dentry->d_lock ->&sb->s_type->i_mutex_key ->&obj_hash[i].lock ->&base->lock ->&c->lock ->&zone->lock ->&____s->seqcount ->(console_sem).lock ->console_owner_lock ->console_owner ->rcu_node_0 ->&rcu_state.expedited_wq ->&rq->__lock ->umhelper_sem ->fw_lock ->rtnl_mutex FD: 2 BD: 4 +.+.: &fw_cache.lock ->pool_lock#2 FD: 1 BD: 1 +.+.: detector_work FD: 1 BD: 1 +.+.: acpi_gpio_deferred_req_irqs_lock FD: 1 BD: 1 +.+.: prepare_lock FD: 31 BD: 1 ..-.: fs/file_table.c:431 FD: 4 BD: 2 +.+.: (delayed_fput_work).work ->&obj_hash[i].lock ->pool_lock#2 FD: 30 BD: 5 +.+.: subsys mutex#80 ->&k->k_lock ->&rq->__lock FD: 2 BD: 12 +.+.: fw_lock ->&x->wait#23 FD: 1 BD: 13 ....: &x->wait#23 FD: 1 BD: 1 +.+.: cdev_lock FD: 344 BD: 2 +.+.: &tty->legacy_mutex ->&tty->read_wait ->&tty->write_wait ->&tty->ldisc_sem ->&tty->files_lock ->&port->lock ->&port->mutex ->&port_lock_key ->tasklist_lock ->&tty->ctrl.lock ->&f->f_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 7 ....: &tty->read_wait FD: 29 BD: 4468 -.-.: &tty->write_wait ->&p->pi_lock FD: 328 BD: 3 ++++: &tty->ldisc_sem ->fs_reclaim ->pool_lock#2 ->&c->lock ->&____s->seqcount ->free_vmap_area_lock ->vmap_area_lock ->&zone->lock ->&tty->write_wait ->&tty->read_wait ->&tty->termios_rwsem ->&mm->mmap_lock ->&port_lock_key ->&port->lock ->&tty->flow.lock ->&ldata->atomic_read_lock FD: 258 BD: 6 ++++: &tty->termios_rwsem ->&port->mutex ->&tty->write_wait ->&tty->read_wait ->&ldata->output_lock ->&port_lock_key FD: 1 BD: 5 +.+.: &tty->files_lock FD: 1 BD: 4468 -.-.: &port->lock FD: 128 BD: 10 +.+.: hash_mutex ->fs_reclaim ->pool_lock#2 FD: 37 BD: 10 -.-.: &i->lock ->&port_lock_key FD: 1 BD: 4489 ....: &wq#2 FD: 1 BD: 5 +.+.: &bdev->bd_holder_lock FD: 386 BD: 1 +.+.: &bdev->bd_fsfreeze_mutex ->sb_lock ->fs_reclaim ->pool_lock#2 ->&type->s_umount_key#25/1 ->&type->s_umount_key#26/1 ->&type->s_umount_key#27/1 ->&type->s_umount_key#28/1 FD: 151 BD: 2 +.+.: &type->s_umount_key#25/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&obj_hash[i].lock ->&wq->mutex ->&c->lock ->&zone->lock ->&____s->seqcount ->kthread_create_lock ->&p->pi_lock ->&x->wait ->&rq->__lock ->wq_pool_mutex ->mmu_notifier_invalidate_range_start ->&xa->xa_lock#8 ->lock#4 ->&mapping->private_lock ->tk_core.seq.seqcount ->&dd->lock ->bit_wait_table + i ->wq_mayday_lock ->&sbi->old_work_lock ->(work_completion)(&(&sbi->old_work)->work) FD: 29 BD: 4443 ..-.: bit_wait_table + i ->&p->pi_lock FD: 8 BD: 2 +.+.: (work_completion)(&s->destroy_work) ->&rsp->gp_wait ->pcpu_lock ->&obj_hash[i].lock FD: 1 BD: 3 +.+.: &sbi->old_work_lock FD: 1 BD: 3 +.+.: (work_completion)(&(&sbi->old_work)->work) FD: 150 BD: 2 +.+.: &type->s_umount_key#26/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&c->lock ->&zone->lock ->&____s->seqcount ->mmu_notifier_invalidate_range_start ->&xa->xa_lock#8 ->lock#4 ->&mapping->private_lock ->tk_core.seq.seqcount ->&dd->lock ->&obj_hash[i].lock ->bit_wait_table + i ->&rq->__lock ->&sb->s_type->i_lock_key#3 ->lock#5 ->&lruvec->lru_lock ->crypto_alg_sem ->lock#3 FD: 34 BD: 6 +.+.: (work_completion)(work) ->lock#4 ->lock#5 ->&rq->__lock FD: 150 BD: 2 +.+.: &type->s_umount_key#27/1 ->fs_reclaim ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&zone->lock ->&____s->seqcount ->&xa->xa_lock#8 ->lock#4 ->&mapping->private_lock ->tk_core.seq.seqcount ->&dd->lock ->&obj_hash[i].lock ->bit_wait_table + i ->&rq->__lock ->&sb->s_type->i_lock_key#3 ->lock#5 ->&lruvec->lru_lock ->crypto_alg_sem ->lock#3 FD: 377 BD: 2 +.+.: &type->s_umount_key#28/1 ->fs_reclaim ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&c->lock ->&____s->seqcount ->pool_lock#2 ->mmu_notifier_invalidate_range_start ->&zone->lock ->&xa->xa_lock#8 ->lock#4 ->&mapping->private_lock ->tk_core.seq.seqcount ->&dd->lock ->&obj_hash[i].lock ->bit_wait_table + i ->&rq->__lock ->&sb->s_type->i_lock_key#3 ->lock#5 ->&lruvec->lru_lock ->crypto_alg_sem ->percpu_counters_lock ->inode_hash_lock ->&sb->s_type->i_lock_key#22 ->&sb->s_type->i_mutex_key#8 ->proc_subdir_lock ->proc_inum_ida.xa_lock ->&journal->j_state_lock ->kthread_create_lock ->&p->pi_lock ->&x->wait ->&journal->j_wait_done_commit ->&p->alloc_lock ->cpu_hotplug_lock ->wq_pool_mutex ->&ei->i_es_lock ->ext4_grpinfo_slab_create_mutex ->&s->s_inode_list_lock ->ext4_li_mtx ->lock ->&root->kernfs_rwsem ->(console_sem).lock ->&dentry->d_lock FD: 35 BD: 168 +.+.: &bgl->locks[i].lock ->&sbi->s_md_lock ->&ei->i_prealloc_lock ->&obj_hash[i].lock ->pool_lock#2 ->&pa->pa_lock ->&lg->lg_prealloc_lock ->&pa->pa_lock#2 ->&meta->lock ->kfence_freelist_lock FD: 57 BD: 4430 +.+.: &sb->s_type->i_lock_key#22 ->&dentry->d_lock ->&lru->node[i].lock ->&xa->xa_lock#8 ->bit_wait_table + i FD: 237 BD: 8 ++++: &sb->s_type->i_mutex_key#8 ->&ei->i_es_lock ->&ei->i_data_sem ->mmu_notifier_invalidate_range_start ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&c->lock ->integrity_iint_lock ->&rq->__lock ->&sem->wait_lock ->tk_core.seq.seqcount ->&ei->xattr_sem ->fs_reclaim ->&xa->xa_lock#8 ->lock#4 ->&mapping->private_lock ->&sb->s_type->i_lock_key#22 ->&wb->list_lock ->&journal->j_state_lock ->jbd2_handle ->&obj_hash[i].lock ->&wb->work_lock ->rcu_node_0 ->&cfs_rq->removed.lock ->remove_cache_srcu ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&rcu_state.expedited_wq ->quarantine_lock ->mapping.invalidate_lock ->free_vmap_area_lock ->vmap_area_lock ->init_mm.page_table_lock ->swap_cgroup_mutex ->&base->lock ->&fq->mq_flush_lock ->&x->wait#26 ->(&timer.timer) ->swapon_mutex ->proc_poll_wait.lock ->&dentry->d_lock ->&n->list_lock ->stock_lock ->&mm->mmap_lock ->&dd->lock ->&journal->j_wait_commit ->&journal->j_wait_done_commit ->&____s->seqcount#2 ->lock#5 ->&p->pi_lock ->&sb->s_type->i_mutex_key#8/4 ->&sbi->s_writepages_rwsem ->&folio_wait_table[i] ->&sem->waiters ->&rsp->gp_wait ->&lruvec->lru_lock ->ima_extend_list_mutex ->&p->alloc_lock ->&list->lock ->kauditd_wait.lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->bit_wait_table + i ->pool_lock ->&journal->j_wait_transaction_locked ->&mapping->i_mmap_rwsem ->key#3 ->key#14 ->batched_entropy_u32.lock ->(console_sem).lock FD: 47 BD: 4415 ++++: &ei->i_es_lock ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&sbi->s_es_lock ->&obj_hash[i].lock ->&zone->lock ->key#2 ->key#6 ->key#7 ->key#8 ->&____s->seqcount#2 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&n->list_lock ->&base->lock ->quarantine_lock ->(console_sem).lock FD: 149 BD: 167 ++++: &ei->i_data_sem ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&ei->i_es_lock ->&obj_hash[i].lock ->&c->lock ->&zone->lock ->&____s->seqcount ->&ei->i_prealloc_lock ->&n->list_lock ->&sb->s_type->i_lock_key#22 ->&(ei->i_block_reservation_lock) ->&lg->lg_mutex ->&ei->i_raw_lock ->&sbi->s_md_lock ->key#3 ->&pa->pa_lock#2 ->&rq->__lock ->&wb->list_lock ->&mapping->private_lock ->&ret->b_state_lock ->&journal->j_revoke_lock ->key#14 ->remove_cache_srcu ->bit_wait_table + i ->&dd->lock ->rcu_node_0 ->&wb->work_lock ->&____s->seqcount#2 ->&journal->j_state_lock ->&ei->i_data_sem/1 ->&sem->wait_lock ->quarantine_lock ->&cfs_rq->removed.lock ->&journal->j_wait_updates ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&rcu_state.expedited_wq ->&bgl->locks[i].lock ->stock_lock ->&xa->xa_lock#8 ->lock#4 ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&base->lock ->batched_entropy_u32.lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->tk_core.seq.seqcount ->(console_sem).lock ->console_owner_lock ->console_owner ->fs_reclaim FD: 1 BD: 4416 +.+.: &sbi->s_es_lock FD: 78 BD: 168 ++++: &journal->j_state_lock ->&journal->j_wait_done_commit ->&journal->j_wait_commit ->tk_core.seq.seqcount ->&obj_hash[i].lock ->&base->lock ->&journal->j_wait_updates ->&journal->j_wait_transaction_locked ->&journal->j_list_lock ->&journal->j_wait_reserved FD: 29 BD: 169 ....: &journal->j_wait_done_commit ->&p->pi_lock FD: 29 BD: 169 ....: &journal->j_wait_commit ->&p->pi_lock FD: 158 BD: 3 +.+.: ext4_grpinfo_slab_create_mutex ->slab_mutex FD: 132 BD: 4 +.+.: ext4_li_mtx ->fs_reclaim ->pool_lock#2 ->batched_entropy_u16.lock ->&eli->li_list_mtx ->kthread_create_lock ->&p->pi_lock ->&x->wait ->&rq->__lock ->&obj_hash[i].lock FD: 1 BD: 1 ....: &rs->lock FD: 185 BD: 6 ++++: &type->i_mutex_dir_key#3 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->rename_lock.seqcount ->&ei->i_es_lock ->&ei->i_data_sem ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->&xa->xa_lock#8 ->lock#4 ->&mapping->private_lock ->tk_core.seq.seqcount ->&dd->lock ->&obj_hash[i].lock ->bit_wait_table + i ->&rq->__lock ->inode_hash_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&journal->j_state_lock ->&sb->s_type->i_lock_key#22 ->namespace_sem ->&zone->lock ->&c->lock ->tomoyo_ss ->&s->s_inode_list_lock ->&ei->xattr_sem ->jbd2_handle ->&n->list_lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->&____s->seqcount#2 ->stock_lock ->remove_cache_srcu ->&meta->lock ->rcu_node_0 ->&journal->j_wait_transaction_locked ->&rcu_state.gp_wq ->&cfs_rq->removed.lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&rcu_state.expedited_wq ->quarantine_lock ->batched_entropy_u32.lock ->&sem->wait_lock FD: 43 BD: 76 +.+.: rcu_state.barrier_mutex ->rcu_state.barrier_lock ->&x->wait#24 ->&rq->__lock ->&pool->lock ->rcu_state.barrier_mutex.wait_lock ->pgd_lock ->pool_lock#2 ->&obj_hash[i].lock ->key ->pcpu_lock ->percpu_counters_lock ->&cfs_rq->removed.lock ->pool_lock ->rcu_node_0 ->&rcu_state.expedited_wq ->stock_lock FD: 29 BD: 77 ..-.: &x->wait#24 ->&p->pi_lock FD: 28 BD: 1 +.+.: (init_mm).mmap_lock ->&rq->__lock FD: 158 BD: 1 +.+.: &type->s_umount_key#29/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->&c->lock ->&zone->lock ->&____s->seqcount ->sb_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#23 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&sb->s_type->i_mutex_key#9 ->&dentry->d_lock FD: 43 BD: 4426 +.+.: &sb->s_type->i_lock_key#23 ->&dentry->d_lock ->bit_wait_table + i FD: 144 BD: 4 ++++: &sb->s_type->i_mutex_key#9 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#23 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->rename_lock.seqcount ->proc_subdir_lock ->sysctl_lock ->&zone->lock ->&____s->seqcount ->&obj_hash[i].lock ->&c->lock ->&p->alloc_lock ->&pid->lock ->namespace_sem ->tomoyo_ss ->remove_cache_srcu ->&n->list_lock ->&rq->__lock ->rcu_node_0 ->&xa->xa_lock#4 ->stock_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&____s->seqcount#2 ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&xa->xa_lock#8 ->&fsnotify_mark_srcu ->&rcu_state.gp_wq ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock FD: 227 BD: 2 .+.+: sb_writers#3 ->mount_lock ->&sb->s_type->i_mutex_key#9 ->sysctl_lock ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->&h->resize_lock ->hugetlb_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#23 ->&wb->list_lock ->&dentry->d_lock ->tomoyo_ss ->&mm->mmap_lock ->oom_adj_mutex ->&c->lock ->&p->pi_lock ->&rq->__lock ->&____s->seqcount#11 ->&(&net->ipv4.ping_group_range.lock)->lock ->rcu_node_0 ->oom_adj_mutex.wait_lock ->remove_cache_srcu ->(console_sem).lock FD: 130 BD: 3 +.+.: &h->resize_lock ->free_hpage_work ->hugetlb_lock ->fs_reclaim ->&____s->seqcount ->pool_lock#2 FD: 1 BD: 4 +.+.: free_hpage_work FD: 2 BD: 151 ....: hugetlb_lock ->&____s->seqcount#2 FD: 170 BD: 155 ++++: mapping.invalidate_lock ->mmu_notifier_invalidate_range_start ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&xa->xa_lock#8 ->lock#4 ->&ei->i_es_lock ->&ei->i_data_sem ->tk_core.seq.seqcount ->&dd->lock ->&obj_hash[i].lock ->&c->lock ->rcu_node_0 ->&rq->__lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&base->lock ->&mapping->i_mmap_rwsem ->&journal->j_state_lock ->jbd2_handle ->stock_lock ->&sem->wait_lock ->&sb->s_type->i_lock_key#22 ->lock#5 ->&lruvec->lru_lock ->&____s->seqcount#2 ->&mapping->private_lock ->&folio_wait_table[i] ->&journal->j_wait_commit ->&journal->j_wait_done_commit ->&meta->lock ->kfence_freelist_lock ->&rcu_state.expedited_wq ->&p->pi_lock ->bit_wait_table + i ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->quarantine_lock ->&cfs_rq->removed.lock ->fs_reclaim ->&n->list_lock ->&journal->j_wait_transaction_locked ->batched_entropy_u8.lock ->remove_cache_srcu ->key#3 ->key#14 FD: 31 BD: 1 ..-.: &(&ovs_net->masks_rebalance)->timer FD: 874 BD: 6 +.+.: (work_completion)(&(&ovs_net->masks_rebalance)->work) ->ovs_mutex ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->ovs_mutex.wait_lock ->&p->pi_lock FD: 1 BD: 4413 ++++: integrity_iint_lock FD: 177 BD: 4 +.+.: &iint->mutex ->&ei->xattr_sem ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->mmu_notifier_invalidate_range_start ->ima_extend_list_mutex ->mapping.invalidate_lock ->&folio_wait_table[i] ->&rq->__lock ->&zone->lock ->&____s->seqcount ->&c->lock ->tk_core.seq.seqcount ->&lock->wait_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&n->list_lock ->rcu_node_0 ->&base->lock ->&____s->seqcount#2 ->remove_cache_srcu ->&cfs_rq->removed.lock ->&rcu_state.expedited_wq FD: 46 BD: 12 .+.+: &ei->xattr_sem ->&mapping->private_lock ->&rq->__lock FD: 1 BD: 4 ++++: entries_lock FD: 228 BD: 2 +.+.: &sig->exec_update_lock ->&p->alloc_lock ->&sighand->siglock ->&newf->file_lock ->batched_entropy_u64.lock ->&mm->mmap_lock ->delayed_uprobe_lock ->&memcg->mm_list.lock ->pgd_lock ->pool_lock#2 ->&obj_hash[i].lock ->key ->pcpu_lock ->percpu_counters_lock ->quarantine_lock ->&rq->__lock ->pool_lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 1 BD: 4413 +.+.: &memcg->mm_list.lock FD: 3 BD: 119 ..-.: batched_entropy_u16.lock ->crngs.lock FD: 29 BD: 4415 +.+.: ptlock_ptr(page)#2/1 FD: 127 BD: 1 ++++: &type->s_umount_key#30 ->shrinker_rwsem ->&dentry->d_lock ->rename_lock.seqcount ->&dentry->d_lock/1 ->&sb->s_type->i_lock_key#23 ->&s->s_inode_list_lock ->&xa->xa_lock#8 ->sysctl_lock ->&obj_hash[i].lock ->pool_lock#2 ->&fsnotify_mark_srcu ->sb_lock ->&rq->__lock ->&lru->node[i].lock ->&pid->lock FD: 872 BD: 2 +.+.: (work_completion)(&map->work) ->&obj_hash[i].lock ->pool_lock#2 ->stock_lock ->rcu_node_0 ->&rq->__lock ->&htab->buckets[i].lock ->vmap_area_lock ->purge_vmap_area_lock ->pcpu_lock ->&rnp->exp_lock ->rcu_state.exp_mutex ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock ->callchain_mutex ->&meta->lock ->kfence_freelist_lock ->&zone->lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->pool_lock ->&cfs_rq->removed.lock ->&rcu_state.expedited_wq ->quarantine_lock ->percpu_counters_lock ->rtnl_mutex ->rtnl_mutex.wait_lock FD: 871 BD: 2 +.+.: (work_completion)(&aux->work) ->map_idr_lock ->&obj_hash[i].lock ->pool_lock#2 ->pack_mutex ->pcpu_lock ->vmap_area_lock ->purge_vmap_area_lock ->stock_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&base->lock ->quarantine_lock ->&rq->__lock ->rcu_node_0 ->&meta->lock ->kfence_freelist_lock ->&cfs_rq->removed.lock ->rtnl_mutex ->pack_mutex.wait_lock ->&p->pi_lock ->rtnl_mutex.wait_lock FD: 1 BD: 4416 ....: key#2 FD: 922 BD: 3 +.+.: &p->lock ->fs_reclaim ->pool_lock#2 ->&mm->mmap_lock ->&c->lock ->&____s->seqcount ->file_systems_lock ->namespace_sem ->&of->mutex ->&n->list_lock ->remove_cache_srcu ->&rq->__lock ->rcu_node_0 ->cpufreq_driver_lock ->module_mutex ->&____s->seqcount#2 ->stock_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->cgroup_mutex ->&obj_hash[i].lock ->&rcu_state.expedited_wq ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->&cfs_rq->removed.lock FD: 144 BD: 1 +.+.: &type->s_umount_key#31/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&root->kernfs_rwsem ->&sb->s_type->i_lock_key#24 ->&root->kernfs_supers_rwsem ->&dentry->d_lock FD: 43 BD: 4430 +.+.: &sb->s_type->i_lock_key#24 ->&dentry->d_lock ->bit_wait_table + i FD: 148 BD: 3 ++++: &type->i_mutex_dir_key#4 ->fs_reclaim ->&c->lock ->&____s->seqcount ->pool_lock#2 ->&dentry->d_lock ->rename_lock.seqcount ->&root->kernfs_rwsem ->&sb->s_type->i_lock_key#24 ->namespace_sem ->tk_core.seq.seqcount ->remove_cache_srcu ->&n->list_lock ->&rq->__lock ->rcu_node_0 ->&obj_hash[i].lock ->&sem->wait_lock ->&p->pi_lock ->&____s->seqcount#2 ->&cfs_rq->removed.lock ->rename_lock FD: 29 BD: 217 ....: &x->wait#25 ->&p->pi_lock FD: 56 BD: 14 +.+.: &net->unx.table.locks[i] ->&net->unx.table.locks[i]/1 ->&u->lock ->clock-AF_UNIX ->rlock-AF_UNIX FD: 1089 BD: 2 +.+.: &sb->s_type->i_mutex_key#10 ->&net->unx.table.locks[i] ->&u->lock ->&u->peer_wait ->rlock-AF_UNIX ->pool_lock#2 ->&dir->lock ->&obj_hash[i].lock ->rcu_node_0 ->&rq->__lock ->nl_table_lock ->nl_table_wait.lock ->clock-AF_NETLINK ->genl_sk_destructing_waitq.lock ->&nlk->wait ->wlock-AF_NETLINK ->(netlink_chain).rwsem ->tomoyo_ss ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#8 ->&wb->list_lock ->&dentry->d_lock ->sk_lock-AF_INET ->slock-AF_INET ->clock-AF_INET ->sk_lock-AF_INET6 ->slock-AF_INET6 ->clock-AF_INET6 ->&table->hash[i].lock ->&net->packet.sklist_lock ->&po->bind_lock ->sk_lock-AF_PACKET ->slock-AF_PACKET ->fanout_mutex ->&rnp->exp_wq[3] ->clock-AF_PACKET ->rlock-AF_PACKET ->pcpu_lock ->elock-AF_PACKET ->&rnp->exp_wq[1] ->&rnp->exp_wq[0] ->&cfs_rq->removed.lock ->&rnp->exp_wq[2] ->quarantine_lock ->sk_lock-AF_BLUETOOTH-BTPROTO_HCI ->slock-AF_BLUETOOTH-BTPROTO_HCI ->hci_dev_list_lock ->rlock-AF_BLUETOOTH ->wlock-AF_BLUETOOTH ->pool_lock ->stock_lock ->sk_lock-AF_INET6/1 ->&net->sctp.addr_wq_lock ->sk_lock-AF_VSOCK ->slock-AF_VSOCK ->&hashinfo->lock#2 ->sk_lock-AF_TIPC ->slock-AF_TIPC ->sk_lock-AF_INET/1 ->(work_completion)(&msk->work) ->clock-AF_NETROM ->sk_lock-AF_NETROM ->slock-AF_NETROM ->sk_lock-AF_BLUETOOTH-BTPROTO_SCO ->slock-AF_BLUETOOTH-BTPROTO_SCO ->clock-AF_BLUETOOTH ->sco_sk_list.lock ->hidp_sk_list.lock ->&match->lock ->l2cap_sk_list.lock ->sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP ->slock-AF_BLUETOOTH-BTPROTO_L2CAP ->&chan->lock/1 ->&conn->chan_lock ->chan_list_lock ->sk_lock-AF_CAN ->slock-AF_CAN ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->clock-AF_ROSE ->sk_lock-AF_ROSE ->slock-AF_ROSE ->wlock-AF_ROSE ->&list->lock#27 ->bcm_notifier_lock ->rlock-AF_CAN ->elock-AF_CAN ->clock-AF_RDS ->&rs->rs_recv_lock ->rds_cong_monitor_lock ->rds_cong_lock ->&rs->rs_lock ->&rs->rs_rdma_lock ->&q->lock ->rds_sock_lock ->pfkey_mutex ->clock-AF_KEY ->wlock-AF_KEY ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock ->rlock-AF_KEY ->&hashinfo->lock ->&bsd_socket_locks[i] ->sk_lock-AF_LLC ->slock-AF_LLC ->&dir->lock#2 ->(&llc->pf_cycle_timer.timer) ->&base->lock ->(&llc->ack_timer.timer) ->(&llc->rej_sent_timer.timer) ->(&llc->busy_state_timer.timer) ->rlock-AF_LLC ->wlock-AF_LLC ->&list->lock#28 ->isotp_notifier_lock ->&net->xdp.lock ->&xs->map_list_lock ->&xs->mutex ->vmap_area_lock ->purge_vmap_area_lock ->clock-AF_XDP ->l2tp_ip6_lock ->&pnsocks.lock ->resource_mutex ->clock-AF_PHONET ->rlock-AF_PHONET ->dgram_lock ->clock-AF_IEEE802154 ->rlock-AF_IEEE802154 ->sk_lock-AF_UNIX ->slock-AF_UNIX ->(work_completion)(&(&psock->work)->work) ->clock-AF_UNIX ->&psock->ingress_lock ->(work_completion)(&(&sw_ctx_tx->tx_work.work)->work) ->(work_completion)(&strp->work) ->krc.lock ->(work_completion)(&smc->connect_work) ->sk_lock-AF_SMC ->slock-AF_SMC ->&smc->clcsock_release_lock ->&net->xfrm.xfrm_policy_lock ->&policy->lock ->&list->lock#31 ->sk_lock-AF_X25 ->slock-AF_X25 ->&____s->seqcount ->&net->ipv4.ra_mutex ->sk_lock-AF_BLUETOOTH-BTPROTO_RFCOMM ->slock-AF_BLUETOOTH-BTPROTO_RFCOMM ->rfcomm_sk_list.lock ->&d->lock ->&list->lock#34 ->sk_lock-AF_KCM ->slock-AF_KCM ->&mux->lock ->(work_completion)(&kcm->tx_work) ->&mux->rx_lock ->&knet->mutex ->clock-AF_RXRPC ->(wq_completion)krxrpcd ->&wq->mutex ->rlock-AF_RXRPC ->sk_lock-AF_PHONET ->slock-AF_PHONET ->&list->lock#35 ->llc_sap_list_lock ->clock-AF_NFC ->rlock-AF_NFC ->&list->lock#36 ->&m->lock ->map_idr_lock ->sk_lock-AF_PPPOX ->slock-AF_PPPOX ->cpu_hotplug_lock ->l2tp_ip_lock ->&rnp->exp_lock ->rcu_state.exp_mutex ->ip6_ra_lock ->rtnl_mutex ->raw_notifier_lock ->rtnl_mutex.wait_lock ->sk_lock-AF_AX25 ->slock-AF_AX25 ->&local->services_lock ->&rx->incoming_lock ->&x->wait ->sk_lock-AF_QIPCRTR ->slock-AF_QIPCRTR ->ip6_sk_fl_lock ->ip6_fl_lock ->(console_sem).lock ->base_sockets.lock ->clock-AF_ISDN ->raw_lock ->&rcu_state.expedited_wq ->(rxrpc_call_limiter).lock ->&rx->recvmsg_lock ->&rx->call_lock ->&rxnet->call_lock ->(&call->timer) ->&list->lock#21 ->&meta->lock ->kfence_freelist_lock ->(&timer.timer) ->rlock-AF_CAIF ->sk_lock-AF_CAIF ->slock-AF_CAIF ->elock-AF_CAIF ->rlock-AF_PPPOX ->wlock-AF_PPPOX ->crypto_default_null_skcipher_lock ->rfcomm_mutex ->rds_ib_devices_lock ->&c->lock ->sk_lock-AF_NFC ->slock-AF_NFC ->&ping_table.lock ->(work_completion)(&(&strp->msg_timer_work)->work) ->(work_completion)(&strp->work)#2 ->data_sockets.lock ->sk_lock-AF_ISDN ->slock-AF_ISDN ->raw_sk_list.lock ->nfnl_grp_active_lock ->&n->list_lock ->&rng->jent_lock FD: 54 BD: 21 +.+.: &u->lock ->clock-AF_UNIX ->&u->lock/1 ->&sk->sk_peer_lock ->rlock-AF_UNIX ->&u->peer_wait ->&ei->socket.wq.wait ->&f->f_owner.lock FD: 4 BD: 97 ++..: clock-AF_UNIX ->pool_lock#2 ->&obj_hash[i].lock FD: 34 BD: 22 +.+.: &u->peer_wait ->&p->pi_lock ->&ei->socket.wq.wait FD: 46 BD: 24 +.+.: rlock-AF_UNIX ->&u->lock/1 FD: 920 BD: 2 .+.+: sb_writers#4 ->mount_lock ->tk_core.seq.seqcount ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&journal->j_state_lock ->jbd2_handle ->&obj_hash[i].lock ->&sb->s_type->i_lock_key#22 ->&wb->list_lock ->&wb->work_lock ->&type->i_mutex_dir_key#3 ->&type->i_mutex_dir_key#3/1 ->&rq->__lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&xa->xa_lock#8 ->lock#4 ->&mapping->private_lock ->&dd->lock ->bit_wait_table + i ->&sb->s_type->i_mutex_key#8 ->tomoyo_ss ->&sem->wait_lock ->&p->pi_lock ->&s->s_inode_list_lock ->sb_internal ->inode_hash_lock ->&fsnotify_mark_srcu ->stock_lock ->lock#5 ->&lruvec->lru_lock ->integrity_iint_lock ->&dentry->d_lock ->rcu_node_0 ->&ei->xattr_sem ->&iint->mutex ->fs_reclaim ->mapping.invalidate_lock ->&folio_wait_table[i] ->&cfs_rq->removed.lock ->&n->list_lock ->&____s->seqcount#2 ->&sbi->s_writepages_rwsem ->&journal->j_wait_commit ->&journal->j_wait_done_commit ->&fq->mq_flush_lock ->&x->wait#26 ->&base->lock ->(&timer.timer) ->remove_cache_srcu ->&sb->s_type->i_mutex_key#8/4 ->&journal->j_list_lock ->quarantine_lock ->&journal->j_wait_transaction_locked ->&rcu_state.expedited_wq ->(console_sem).lock ->console_owner_lock ->console_owner ->&journal->j_wait_reserved ->&journal->j_barrier ->&pipe->mutex/1 ->&pipe->rd_wait ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock FD: 1 BD: 4413 +.+.: &pid->lock FD: 1 BD: 27 +.+.: &new_ns->ns_lock FD: 201 BD: 1 ++++: &type->s_umount_key#32 ->&lru->node[i].lock ->&dentry->d_lock ->&sb->s_type->i_lock_key#22 ->&obj_hash[i].lock ->pool_lock#2 ->&journal->j_state_lock ->&p->alloc_lock ->(work_completion)(&sbi->s_error_work) ->key#3 ->key#4 ->&sbi->s_error_lock ->mmu_notifier_invalidate_range_start ->&c->lock ->tk_core.seq.seqcount ->&base->lock ->&fq->mq_flush_lock ->&dd->lock ->bit_wait_table + i ->&rq->__lock ->ext4_li_mtx ->(console_sem).lock ->mount_lock ->&xa->xa_lock#8 ->&eli->li_list_mtx ->&wb->list_lock ->&sbi->s_writepages_rwsem ->rcu_node_0 ->&bdi->wb_waitq ->&cfs_rq->removed.lock ->&s->s_inode_list_lock ->&ei->i_es_lock ->inode_hash_lock ->&fsnotify_mark_srcu ->&ei->i_prealloc_lock ->integrity_iint_lock ->&journal->j_list_lock ->&rcu_state.expedited_wq FD: 1 BD: 2 +.+.: (work_completion)(&sbi->s_error_work) FD: 1 BD: 171 ....: key#3 FD: 1 BD: 165 ....: key#4 FD: 1 BD: 2 +.+.: &sbi->s_error_lock FD: 34 BD: 167 ..-.: &fq->mq_flush_lock ->tk_core.seq.seqcount ->&q->requeue_lock ->&obj_hash[i].lock ->bit_wait_table + i ->&x->wait#26 FD: 1 BD: 173 ..-.: &q->requeue_lock FD: 4 BD: 5 +.+.: &eli->li_list_mtx ->&obj_hash[i].lock ->pool_lock#2 FD: 167 BD: 164 ++++: jbd2_handle ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&ret->b_state_lock ->&journal->j_revoke_lock ->&ei->i_raw_lock ->&journal->j_wait_updates ->&mapping->private_lock ->&meta_group_info[i]->alloc_sem ->tk_core.seq.seqcount ->inode_hash_lock ->batched_entropy_u32.lock ->&ei->i_es_lock ->&sb->s_type->i_lock_key#22 ->&rq->__lock ->lock#4 ->lock#5 ->&obj_hash[i].lock ->&ei->i_data_sem ->&xa->xa_lock#8 ->&journal->j_state_lock ->bit_wait_table + i ->&sbi->s_orphan_lock ->&journal->j_list_lock ->&base->lock ->&dd->lock ->&rq_wait->wait ->rcu_node_0 ->batched_entropy_u8.lock ->kfence_freelist_lock ->stock_lock ->&bgl->locks[i].lock ->&ei->i_prealloc_lock ->&(ei->i_block_reservation_lock) ->key#4 ->&lock->wait_lock ->&p->pi_lock ->&____s->seqcount#2 ->&folio_wait_table[i] ->&sem->wait_lock ->remove_cache_srcu ->&ei->i_data_sem/1 ->&n->list_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->hrtimer_bases.lock ->&journal->j_wait_reserved ->&cfs_rq->removed.lock ->&rcu_state.expedited_wq ->&wb->list_lock ->&meta->lock ->&lruvec->lru_lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock FD: 73 BD: 169 +.+.: &ret->b_state_lock ->&journal->j_list_lock ->&obj_hash[i].lock ->bit_wait_table + i FD: 72 BD: 4419 +.+.: &journal->j_list_lock ->&sb->s_type->i_lock_key#3 ->&wb->list_lock ->&obj_hash[i].lock ->&c->lock ->pool_lock#2 ->key#15 ->&wb->work_lock ->&meta->lock ->kfence_freelist_lock ->&base->lock FD: 1 BD: 168 +.+.: &journal->j_revoke_lock FD: 1 BD: 168 +.+.: &ei->i_raw_lock FD: 29 BD: 169 ....: &journal->j_wait_updates ->&p->pi_lock FD: 33 BD: 4447 ..-.: &wb->work_lock ->&obj_hash[i].lock ->&base->lock FD: 53 BD: 165 ++++: &meta_group_info[i]->alloc_sem ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->tk_core.seq.seqcount ->&dd->lock ->&x->wait#26 ->&obj_hash[i].lock ->rcu_node_0 ->&rq->__lock ->&base->lock ->(&timer.timer) ->&fq->mq_flush_lock ->&bgl->locks[i].lock ->&cfs_rq->removed.lock FD: 172 BD: 4 .+.+: sb_internal ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&journal->j_state_lock ->jbd2_handle ->&obj_hash[i].lock ->&c->lock ->&rq->__lock ->&____s->seqcount#2 ->&____s->seqcount ->&journal->j_wait_commit ->&journal->j_wait_done_commit ->rcu_node_0 ->&n->list_lock ->pgd_lock ->stock_lock ->key ->pcpu_lock ->percpu_counters_lock ->remove_cache_srcu ->&cfs_rq->removed.lock ->&journal->j_wait_transaction_locked ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->quarantine_lock ->&rcu_state.expedited_wq ->&base->lock FD: 2 BD: 4417 ++++: &ei->i_prealloc_lock ->&pa->pa_lock#2 FD: 31 BD: 1 .+.+: file_rwsem ->&ctx->flc_lock ->&rq->__lock ->rcu_node_0 FD: 2 BD: 2 +.+.: &ctx->flc_lock ->&fll->lock FD: 1 BD: 3 +.+.: &fll->lock FD: 234 BD: 3 +.+.: &type->i_mutex_dir_key#3/1 ->rename_lock.seqcount ->&dentry->d_lock ->fs_reclaim ->&ei->i_es_lock ->&ei->i_data_sem ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->pool_lock#2 ->&xa->xa_lock#8 ->lock#4 ->&mapping->private_lock ->tk_core.seq.seqcount ->&dd->lock ->&obj_hash[i].lock ->bit_wait_table + i ->&rq->__lock ->inode_hash_lock ->&journal->j_state_lock ->&sb->s_type->i_lock_key#22 ->tomoyo_ss ->&s->s_inode_list_lock ->&ei->xattr_sem ->jbd2_handle ->&c->lock ->&sb->s_type->i_mutex_key#8 ->&sem->wait_lock ->&n->list_lock ->&____s->seqcount#2 ->&xa->xa_lock#4 ->stock_lock ->&fsnotify_mark_srcu ->&type->i_mutex_dir_key#3 ->&wb->list_lock ->sb_internal ->rcu_node_0 ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->remove_cache_srcu ->&rcu_state.gp_wq ->&u->bindlock ->&journal->j_wait_transaction_locked ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&journal->j_wait_commit ->&journal->j_wait_done_commit ->&meta->lock ->quarantine_lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock FD: 141 BD: 1 +.+.: &type->s_umount_key#33/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&c->lock ->&____s->seqcount ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#25 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&sb->s_type->i_mutex_key#11 ->&dentry->d_lock FD: 42 BD: 3 +.+.: &sb->s_type->i_lock_key#25 ->&dentry->d_lock FD: 129 BD: 2 +.+.: &sb->s_type->i_mutex_key#11 ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#25 ->&s->s_inode_list_lock ->tk_core.seq.seqcount FD: 130 BD: 1 +.+.: &type->s_umount_key#34 ->sb_lock ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->&lru->node[i].lock ->&obj_hash[i].lock FD: 44 BD: 1 +.+.: &type->s_umount_key#35 ->sb_lock ->&dentry->d_lock FD: 140 BD: 1 +.+.: &type->s_umount_key#36/1 ->fs_reclaim ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->pool_lock#2 ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#26 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&dentry->d_lock ->&c->lock ->&____s->seqcount FD: 42 BD: 4 +.+.: &sb->s_type->i_lock_key#26 ->&dentry->d_lock FD: 1 BD: 1 +.+.: redirect_lock FD: 325 BD: 1 +.+.: &tty->atomic_write_lock ->fs_reclaim ->pool_lock#2 ->&mm->mmap_lock ->&tty->termios_rwsem ->&tty->files_lock FD: 37 BD: 7 +.+.: &ldata->output_lock ->&port_lock_key ->&rq->__lock FD: 140 BD: 1 +.+.: &type->s_umount_key#37/1 ->fs_reclaim ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->pool_lock#2 ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#27 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->fuse_mutex ->&dentry->d_lock FD: 42 BD: 4426 +.+.: &sb->s_type->i_lock_key#27 ->&dentry->d_lock FD: 1 BD: 2 +.+.: fuse_mutex FD: 141 BD: 1 +.+.: &type->s_umount_key#38/1 ->fs_reclaim ->batched_entropy_u8.lock ->kfence_freelist_lock ->pcpu_alloc_mutex ->shrinker_rwsem ->pool_lock#2 ->list_lrus_mutex ->sb_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#28 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->pstore_sb_lock ->&dentry->d_lock FD: 42 BD: 2 +.+.: &sb->s_type->i_lock_key#28 ->&dentry->d_lock FD: 1 BD: 2 +.+.: pstore_sb_lock FD: 144 BD: 1 +.+.: &type->s_umount_key#39/1 ->fs_reclaim ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->pool_lock#2 ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#29 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->bpf_preload_lock ->&dentry->d_lock FD: 42 BD: 2 +.+.: &sb->s_type->i_lock_key#29 ->&dentry->d_lock FD: 131 BD: 2 +.+.: bpf_preload_lock ->(kmod_concurrent_max).lock ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->&x->wait#17 ->&rq->__lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->running_helpers_waitq.lock FD: 15 BD: 1 +.-.: (&cb->timer) ->&obj_hash[i].lock ->&base->lock ->tk_core.seq.seqcount FD: 29 BD: 1 ++++: uts_sem ->hostname_poll.wait.lock ->&rq->__lock FD: 125 BD: 3 ++++: &type->i_mutex_dir_key#5 ->fs_reclaim ->&dentry->d_lock ->rename_lock.seqcount ->tomoyo_ss ->&sbinfo->stat_lock ->pool_lock#2 ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->batched_entropy_u32.lock ->&c->lock ->&____s->seqcount ->&obj_hash[i].lock ->&n->list_lock ->&dentry->d_lock/1 ->&sem->wait_lock ->rcu_node_0 ->&rq->__lock ->&rcu_state.gp_wq ->remove_cache_srcu ->batched_entropy_u8.lock ->kfence_freelist_lock ->&____s->seqcount#2 ->&cfs_rq->removed.lock ->&rcu_state.expedited_wq FD: 150 BD: 2 .+.+: sb_writers#5 ->mount_lock ->&type->i_mutex_dir_key#5 ->&type->i_mutex_dir_key#5/1 ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key ->&wb->list_lock ->&sb->s_type->i_mutex_key#12 ->&sem->wait_lock ->&p->pi_lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->&s->s_inode_list_lock ->&info->lock ->&sbinfo->stat_lock ->&xa->xa_lock#8 ->&fsnotify_mark_srcu ->tomoyo_ss ->&xattrs->lock ->fs_reclaim ->lock#4 ->lock#5 ->&lruvec->lru_lock ->&dentry->d_lock ->&c->lock ->&____s->seqcount ->rcu_node_0 ->&rcu_state.expedited_wq FD: 126 BD: 4 +.+.: &sb->s_type->i_mutex_key#12 ->&xattrs->lock ->tk_core.seq.seqcount ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->&xa->xa_lock#8 ->lock#4 ->&info->lock ->&sb->s_type->i_lock_key ->&wb->list_lock ->key#9 ->&dentry->d_lock ->rename_lock ->&sb->s_type->i_mutex_key#12/4 ->&rq->__lock ->tomoyo_ss ->&mapping->i_mmap_rwsem ->lock#5 ->&lruvec->lru_lock ->&obj_hash[i].lock ->&cfs_rq->removed.lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 146 BD: 3 +.+.: &type->i_mutex_dir_key#5/1 ->rename_lock.seqcount ->fs_reclaim ->&dentry->d_lock ->tomoyo_ss ->&sbinfo->stat_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->batched_entropy_u32.lock ->&c->lock ->&____s->seqcount ->pool_lock#2 ->&obj_hash[i].lock ->&u->bindlock ->&sb->s_type->i_mutex_key#12 ->&dentry->d_lock/1 ->&sem->wait_lock ->&rq->__lock ->&n->list_lock ->remove_cache_srcu ->&fsnotify_mark_srcu ->lock#4 ->lock#5 ->&lruvec->lru_lock ->&info->lock ->&xa->xa_lock#8 ->rcu_node_0 ->&____s->seqcount#2 ->&sb->s_type->i_mutex_key#12/4 ->&cfs_rq->removed.lock ->&rcu_state.gp_wq ->&rcu_state.expedited_wq ->batched_entropy_u8.lock ->kfence_freelist_lock ->&p->pi_lock FD: 7 BD: 81 +.+.: &f->f_lock ->fasync_lock FD: 1 BD: 2 ....: hostname_poll.wait.lock FD: 996 BD: 1 +.+.: &f->f_pos_lock ->&type->i_mutex_dir_key#3 ->&mm->mmap_lock ->&sb->s_type->i_mutex_key#9 ->&type->i_mutex_dir_key#2 ->&type->i_mutex_dir_key#4 ->&type->i_mutex_dir_key#5 ->rcu_node_0 ->&rq->__lock ->sb_writers#5 ->&p->lock ->sb_writers#3 ->sb_writers#4 ->&lock->wait_lock ->&sb->s_type->i_mutex_key#18 ->sb_writers#11 ->sb_writers#10 ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->&rcu_state.expedited_wq ->(console_sem).lock ->console_owner_lock ->console_owner ->tk_core.seq.seqcount ->fs_reclaim ->pool_lock#2 ->&c->lock FD: 213 BD: 1 .+.+: dup_mmap_sem ->&mm->mmap_lock ->&rq->__lock FD: 134 BD: 147 +.+.: &mm->mmap_lock/1 ->pool_lock#2 ->&c->lock ->&____s->seqcount ->&vma->vm_lock->lock ->fs_reclaim ->&mapping->i_mmap_rwsem ->&anon_vma->rwsem ->mmu_notifier_invalidate_range_start ->&mm->page_table_lock ->ptlock_ptr(page) ->ptlock_ptr(page)#2 ->&mm->context.lock ->&obj_hash[i].lock ->&zone->lock ->&rq->__lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&n->list_lock ->remove_cache_srcu ->&sem->wait_lock ->&p->pi_lock ->rcu_node_0 ->&cfs_rq->removed.lock ->quarantine_lock ->&rcu_state.expedited_wq ->stock_lock ->&____s->seqcount#2 ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->key#24 ->&meta->lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->&base->lock ->lock#10 FD: 28 BD: 148 +.+.: &mm->context.lock ->&rq->__lock FD: 1 BD: 10 .+.+: &xattrs->lock FD: 145 BD: 8 +.+.: &u->bindlock ->&net->unx.table.locks[i] ->&bsd_socket_locks[i] ->fs_reclaim ->pool_lock#2 ->batched_entropy_u32.lock ->&net->unx.table.locks[i]/1 FD: 42 BD: 15 +.+.: &net->unx.table.locks[i]/1 ->&dentry->d_lock FD: 1 BD: 11 +.+.: &bsd_socket_locks[i] FD: 228 BD: 7 +.+.: &u->iolock ->rlock-AF_UNIX ->&mm->mmap_lock ->&obj_hash[i].lock ->pool_lock#2 ->&meta->lock ->kfence_freelist_lock ->&rq->__lock ->&u->peer_wait ->quarantine_lock ->rcu_node_0 ->&base->lock ->&u->lock ->&____s->seqcount ->&dir->lock ->&cfs_rq->removed.lock ->&rcu_state.expedited_wq ->stock_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->unix_gc_lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock FD: 33 BD: 4075 ..-.: &ei->socket.wq.wait ->&p->pi_lock ->&ep->lock ->&ep->poll_wait/1 FD: 1 BD: 2 ....: key#5 FD: 1 BD: 4489 ....: &wq#3 FD: 45 BD: 25 +.+.: &u->lock/1 ->&sk->sk_peer_lock ->&dentry->d_lock ->&sk->sk_peer_lock/1 ->clock-AF_UNIX FD: 143 BD: 1 +.+.: &group->mark_mutex ->&fsnotify_mark_srcu ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->&c->lock ->lock ->ucounts_lock ->&mark->lock ->&conn->lock ->&sb->s_type->i_lock_key#5 ->&sb->s_type->i_lock_key#22 ->&sb->s_type->i_lock_key ->&____s->seqcount#2 ->&rq->__lock ->remove_cache_srcu ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->&n->list_lock ->rcu_node_0 ->&lock->wait_lock ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 13 BD: 248 +.+.: &group->inotify_data.idr_lock ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&n->list_lock FD: 3 BD: 2 +.+.: &mark->lock ->&fsnotify_mark_srcu ->&conn->lock FD: 1 BD: 7 +.+.: &conn->lock FD: 1 BD: 1 +.+.: &evdev->client_lock FD: 223 BD: 1 +.+.: &evdev->mutex ->&dev->mutex#2 ->&mm->mmap_lock FD: 226 BD: 4 +.+.: sk_lock-AF_NETLINK ->slock-AF_NETLINK ->&mm->mmap_lock ->fs_reclaim ->pool_lock#2 ->free_vmap_area_lock ->vmap_area_lock ->&____s->seqcount ->pcpu_alloc_mutex ->&obj_hash[i].lock ->pack_mutex ->batched_entropy_u32.lock ->text_mutex ->&fp->aux->used_maps_mutex ->rcu_node_0 ->&rq->__lock ->&c->lock ->stock_lock ->&n->list_lock FD: 1 BD: 5 +...: slock-AF_NETLINK FD: 1 BD: 4034 ..-.: rlock-AF_NETLINK FD: 1 BD: 7 ....: &nlk->wait FD: 134 BD: 78 +.+.: (work_completion)(&ht->run_work) ->&ht->mutex ->&rq->__lock FD: 133 BD: 79 +.+.: &ht->mutex ->fs_reclaim ->pool_lock#2 ->batched_entropy_u32.lock ->rhashtable_bucket ->&ht->lock ->&c->lock ->&n->list_lock ->remove_cache_srcu ->&rq->__lock ->&obj_hash[i].lock ->quarantine_lock ->&____s->seqcount#2 ->&____s->seqcount ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->rcu_node_0 ->&cfs_rq->removed.lock ->&rcu_state.expedited_wq ->&zone->lock FD: 1 BD: 3846 ....: rhashtable_bucket/1 FD: 12 BD: 88 +.+.: &ht->lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 3 +...: clock-AF_NETLINK FD: 1 BD: 7 ....: genl_sk_destructing_waitq.lock FD: 1 BD: 7 ....: wlock-AF_NETLINK FD: 173 BD: 2 +.+.: (work_completion)(&w->w) ->nfc_devlist_mutex ->&obj_hash[i].lock ->pool_lock#2 ->&rq->__lock ->&meta->lock ->kfence_freelist_lock ->quarantine_lock ->&base->lock FD: 1 BD: 4 +.+.: &genl_data->genl_data_mutex FD: 1 BD: 4 +...: &rdev->beacon_registrations_lock FD: 1 BD: 77 +.-.: &rdev->mgmt_registrations_lock FD: 1 BD: 4 +...: &wdev->pmsr_lock FD: 1 BD: 73 +.+.: reg_indoor_lock FD: 1094 BD: 1 .+.+: sb_writers#6 ->mount_lock ->&sb->s_type->i_mutex_key#10 FD: 2 BD: 26 +.+.: &sk->sk_peer_lock ->&sk->sk_peer_lock/1 FD: 32 BD: 7 ....: &group->notification_waitq ->&p->pi_lock ->&ep->lock FD: 1 BD: 7 +.+.: &group->notification_lock FD: 1 BD: 1 ....: &client->wait FD: 907 BD: 3 +.+.: &pipe->mutex/1 ->&pipe->rd_wait ->&rq->__lock ->&lock->wait_lock ->&pipe->wr_wait ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->&mm->mmap_lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->rcu_node_0 ->stock_lock ->sk_lock-AF_NETLINK ->slock-AF_NETLINK ->free_vmap_area_lock ->vmap_area_lock ->init_mm.page_table_lock ->&c->lock ->nfnl_subsys_nftables ->&nft_net->commit_mutex ->purge_vmap_area_lock ->&sighand->siglock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->sk_lock-AF_INET6 ->slock-AF_INET6 ->&u->iolock ->&ei->socket.wq.wait ->&pipe->mutex#2/2 ->&p->pi_lock ->&____s->seqcount#2 ->&f->f_lock ->&sb->s_type->i_mutex_key#8 ->rtnl_mutex ->rtnl_mutex.wait_lock ->rlock-AF_NETLINK ->nfnl_subsys_ctnetlink ->&n->list_lock ->remove_cache_srcu ->&u->lock FD: 32 BD: 6 ....: &pipe->rd_wait ->&p->pi_lock ->&ep->lock FD: 1 BD: 4416 ....: key#6 FD: 1 BD: 4416 ....: key#7 FD: 1 BD: 4416 ....: key#8 FD: 1 BD: 4413 ....: &sem->wait_lock FD: 29 BD: 6 ....: &pipe->wr_wait ->&p->pi_lock FD: 48 BD: 1 .+.+: sb_writers#7 ->tk_core.seq.seqcount ->mount_lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 1 BD: 174 ....: key#9 FD: 33 BD: 4489 +.+.: &dentry->d_lock/2 ->&dentry->d_lock/3 FD: 32 BD: 4490 +.+.: &dentry->d_lock/3 ->&____s->seqcount#4 ->&wq FD: 1 BD: 4492 +.+.: &____s->seqcount#4/1 FD: 260 BD: 3 +.+.: sk_lock-AF_UNIX ->slock-AF_UNIX ->&c->lock ->&obj_hash[i].lock ->&psock->ingress_lock ->reuseport_lock ->&mm->mmap_lock ->fs_reclaim ->free_vmap_area_lock ->vmap_area_lock ->&____s->seqcount ->stock_lock ->pcpu_alloc_mutex ->pack_mutex ->batched_entropy_u32.lock ->text_mutex ->&fp->aux->used_maps_mutex ->&u->iolock FD: 1 BD: 4 +...: slock-AF_UNIX FD: 1 BD: 1 ....: &rs->lock#2 FD: 29 BD: 168 ..-.: &x->wait#26 ->&p->pi_lock FD: 56 BD: 3 +.+.: oom_adj_mutex ->&p->alloc_lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->rcu_node_0 ->oom_adj_mutex.wait_lock FD: 246 BD: 2 +.+.: &ep->mtx ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&f->f_lock ->&ei->socket.wq.wait ->&ep->lock ->&group->notification_waitq ->&group->notification_lock ->&sighand->signalfd_wqh ->&sighand->siglock ->&rq->__lock ->&pipe->rd_wait ->&obj_hash[i].lock ->key#11 ->remove_cache_srcu ->&lock->wait_lock ->&pipe->wr_wait ->&p->pi_lock ->stock_lock ->wakeup_ida.xa_lock ->&x->wait#9 ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->uevent_sock_mutex ->uevent_sock_mutex.wait_lock ->subsys mutex#15 ->events_lock ->&dentry->d_lock ->&u->lock ->&ws->lock ->&ACCESS_PRIVATE(sdp, lock) ->wakeup_srcu ->&x->wait#3 ->(&ws->timer) ->&base->lock ->deferred_probe_mutex ->device_links_lock ->mmu_notifier_invalidate_range_start ->deleted_ws.lock ->&____s->seqcount#2 ->rcu_node_0 ->&n->list_lock ->&sem->wait_lock ->&rcu_state.expedited_wq ->quarantine_lock ->&cfs_rq->removed.lock ->&ep->mtx/1 ->&ep->poll_wait ->rlock-AF_PACKET ->wlock-AF_PACKET ->kernfs_idr_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->gdp_mutex.wait_lock FD: 247 BD: 1 +.+.: epnested_mutex ->&ep->mtx ->&ep->mtx/1 FD: 31 BD: 4083 ...-: &ep->lock ->&ep->wq ->&ws->lock FD: 32 BD: 156 ....: &sighand->signalfd_wqh ->&p->pi_lock ->&ep->lock FD: 922 BD: 1 .+.+: sb_writers#8 ->mount_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#24 ->&wb->list_lock ->&type->i_mutex_dir_key#4 ->fs_reclaim ->pool_lock#2 ->&mm->mmap_lock ->&of->mutex ->&obj_hash[i].lock ->&rq->__lock ->&c->lock ->&n->list_lock ->rcu_node_0 ->remove_cache_srcu ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&root->kernfs_iattr_rwsem ->&dentry->d_lock ->tomoyo_ss ->&sb->s_type->i_mutex_key#13 ->iattr_mutex ->&xattrs->lock ->&____s->seqcount#2 ->&____s->seqcount ->&cfs_rq->removed.lock FD: 3 BD: 11 +.+.: swap_lock ->&p->lock#2 FD: 142 BD: 1 .+.+: kn->active ->fs_reclaim ->pool_lock#2 ->&kernfs_locks->open_file_mutex[count] ->&k->list_lock ->uevent_sock_mutex ->&obj_hash[i].lock ->&c->lock ->&n->list_lock ->&____s->seqcount ->remove_cache_srcu FD: 130 BD: 74 +.+.: &kernfs_locks->open_file_mutex[count] ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->krc.lock ->&c->lock ->&____s->seqcount ->&rq->__lock ->&n->list_lock ->remove_cache_srcu ->&lock->wait_lock ->&____s->seqcount#2 ->rcu_node_0 ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock FD: 130 BD: 2 +.+.: &sb->s_type->i_mutex_key#13 ->tk_core.seq.seqcount ->&root->kernfs_iattr_rwsem ->&sem->wait_lock ->&p->pi_lock ->&rq->__lock FD: 914 BD: 6 +.+.: &of->mutex ->&rq->__lock ->cgroup_mutex ->cpu_hotplug_lock ->cpuset_hotplug_work FD: 29 BD: 4084 ..-.: &ep->wq ->&p->pi_lock FD: 141 BD: 1 .+.+: kn->active#2 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->pool_lock#2 ->uevent_sock_mutex ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount ->&n->list_lock ->&rq->__lock ->quarantine_lock ->remove_cache_srcu FD: 35 BD: 2 +.+.: (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) ->krc.lock ->&obj_hash[i].lock ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 141 BD: 1 .+.+: kn->active#3 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&____s->seqcount ->pool_lock#2 ->uevent_sock_mutex ->&obj_hash[i].lock ->&n->list_lock ->&rq->__lock ->quarantine_lock ->remove_cache_srcu FD: 135 BD: 1 .+.+: kn->active#4 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->param_lock ->&c->lock ->&n->list_lock ->pool_lock#2 ->&on->poll ->remove_cache_srcu FD: 128 BD: 256 +.+.: iattr_mutex ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->&c->lock ->tk_core.seq.seqcount ->&rq->__lock FD: 1 BD: 74 +.+.: disk_events_mutex FD: 168 BD: 1 .+.+: kn->active#5 ->fs_reclaim ->&c->lock ->&kernfs_locks->open_file_mutex[count] ->pool_lock#2 ->uevent_sock_mutex ->&obj_hash[i].lock ->&____s->seqcount ->batched_entropy_u8.lock ->kfence_freelist_lock ->&n->list_lock ->&rq->__lock ->remove_cache_srcu ->&device->physical_node_lock ->udc_lock ->&zone->lock ->fw_lock ->quarantine_lock ->&rfkill->lock ->&cfs_rq->removed.lock ->&lock->wait_lock ->&p->pi_lock ->&meta->lock ->rcu_node_0 ->&____s->seqcount#2 ->uevent_sock_mutex.wait_lock ->pgd_lock ->stock_lock ->key ->pcpu_lock ->percpu_counters_lock ->&rcu_state.expedited_wq FD: 131 BD: 1 .+.+: kn->active#6 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock FD: 131 BD: 1 .+.+: kn->active#7 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock FD: 131 BD: 1 .+.+: kn->active#8 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 131 BD: 1 .+.+: kn->active#9 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock FD: 131 BD: 1 .+.+: kn->active#10 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock FD: 131 BD: 1 .+.+: kn->active#11 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&n->list_lock FD: 131 BD: 1 .+.+: kn->active#12 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock FD: 131 BD: 1 .+.+: kn->active#13 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock FD: 131 BD: 1 .+.+: kn->active#14 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount FD: 1 BD: 126 +.+.: rcu_state.exp_mutex.wait_lock FD: 133 BD: 1 .+.+: kn->active#15 ->fs_reclaim ->&c->lock ->&n->list_lock ->&kernfs_locks->open_file_mutex[count] ->dev_base_lock ->&____s->seqcount ->&____s->seqcount#2 FD: 133 BD: 1 .+.+: kn->active#16 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->dev_base_lock ->&c->lock ->&n->list_lock FD: 131 BD: 1 .+.+: kn->active#17 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount FD: 133 BD: 1 .+.+: kn->active#18 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->dev_base_lock ->&c->lock ->&n->list_lock ->&rq->__lock FD: 133 BD: 1 .+.+: kn->active#19 ->fs_reclaim ->&c->lock ->&kernfs_locks->open_file_mutex[count] ->dev_base_lock ->&lock->wait_lock ->&p->pi_lock ->&rq->__lock ->&n->list_lock ->&____s->seqcount#2 ->&____s->seqcount FD: 133 BD: 1 .+.+: kn->active#20 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->dev_base_lock FD: 133 BD: 1 .+.+: kn->active#21 ->fs_reclaim ->pool_lock#2 ->&kernfs_locks->open_file_mutex[count] ->&dev->power.lock ->pci_lock FD: 2 BD: 8 ....: pci_lock ->pci_config_lock FD: 131 BD: 1 .+.+: kn->active#22 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 131 BD: 1 .+.+: kn->active#23 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 131 BD: 1 .+.+: kn->active#24 ->fs_reclaim ->&c->lock ->&kernfs_locks->open_file_mutex[count] FD: 131 BD: 1 .+.+: kn->active#25 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 131 BD: 1 .+.+: kn->active#26 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 131 BD: 1 .+.+: kn->active#27 ->fs_reclaim ->remove_cache_srcu ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&n->list_lock ->&____s->seqcount ->pool_lock#2 FD: 131 BD: 1 .+.+: kn->active#28 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&n->list_lock ->&____s->seqcount FD: 131 BD: 1 .+.+: kn->active#29 ->fs_reclaim ->&c->lock ->&kernfs_locks->open_file_mutex[count] ->&____s->seqcount FD: 131 BD: 1 .+.+: kn->active#30 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&____s->seqcount FD: 131 BD: 1 .+.+: kn->active#31 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&rq->__lock ->&____s->seqcount FD: 131 BD: 1 .+.+: kn->active#32 ->fs_reclaim ->&c->lock ->&kernfs_locks->open_file_mutex[count] ->&n->list_lock ->&____s->seqcount FD: 131 BD: 1 .+.+: kn->active#33 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&n->list_lock ->&____s->seqcount FD: 1 BD: 1 +.+.: &mousedev->client_lock FD: 45 BD: 8 +.+.: &mousedev->mutex#2 ->&dev->mutex#2 FD: 1 BD: 1 +.+.: &sb->s_type->i_mutex_key#14 FD: 57 BD: 1 .+.+: mapping.invalidate_lock#2 ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->pool_lock#2 ->&xa->xa_lock#8 ->lock#4 ->tk_core.seq.seqcount ->&dd->lock ->&rq->__lock ->&c->lock FD: 46 BD: 5 +.+.: &sb->s_type->i_mutex_key#12/4 ->&dentry->d_lock ->tk_core.seq.seqcount ->rename_lock ->&rq->__lock FD: 131 BD: 1 .+.+: kn->active#34 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 131 BD: 1 .+.+: kn->active#35 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 131 BD: 1 .+.+: kn->active#36 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 131 BD: 1 .+.+: kn->active#37 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&____s->seqcount ->&n->list_lock ->remove_cache_srcu FD: 132 BD: 1 .+.+: kn->active#38 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->i2c_dev_list_lock FD: 133 BD: 2 +.+.: &dev_instance->mutex ->fs_reclaim ->&c->lock ->pool_lock#2 ->vicodec_core:1844:(hdl)->_lock ->&vdev->fh_lock ->&m2m_dev->job_spinlock ->&q->done_wq ->&q->mmap_lock ->&obj_hash[i].lock FD: 4 BD: 3 +.+.: vicodec_core:1844:(hdl)->_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 5 ....: &vdev->fh_lock FD: 138 BD: 1 +.+.: &mdev->req_queue_mutex ->&dev_instance->mutex ->&vdev->fh_lock ->&mdev->graph_mutex ->vicodec_core:1844:(hdl)->_lock ->&obj_hash[i].lock ->pool_lock#2 ->vim2m:1183:(hdl)->_lock ->&dev->dev_mutex ->&dev->mutex#3 FD: 1 BD: 4 ....: &m2m_dev->job_spinlock FD: 1 BD: 4 ....: &q->done_wq FD: 1 BD: 4 +.+.: &q->mmap_lock FD: 131 BD: 1 .+.+: kn->active#39 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&n->list_lock FD: 1 BD: 1 +.+.: fh->state->lock FD: 133 BD: 2 +.+.: &dev->dev_mutex ->&rq->__lock ->fs_reclaim ->pool_lock#2 ->vim2m:1183:(hdl)->_lock ->&c->lock ->&obj_hash[i].lock ->&vdev->fh_lock ->&m2m_dev->job_spinlock ->&q->done_wq ->&q->mmap_lock FD: 4 BD: 3 +.+.: vim2m:1183:(hdl)->_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 27 +.+.: &sk->sk_peer_lock/1 FD: 1 BD: 1 +.+.: &vcapture->lock FD: 33 BD: 1 ..-.: &(&wb->dwork)->timer FD: 185 BD: 1 +.+.: (wq_completion)writeback ->(work_completion)(&(&wb->dwork)->work) ->(work_completion)(&(&wb->bw_dwork)->work) FD: 183 BD: 2 +.+.: (work_completion)(&(&wb->dwork)->work) ->&wb->work_lock ->&wb->list_lock ->&p->sequence ->key#10 ->&sb->s_type->i_lock_key#22 ->&sbi->s_writepages_rwsem ->pool_lock#2 ->&dd->lock ->&obj_hash[i].lock ->&pl->lock ->&rq->__lock ->&bdi->wb_waitq FD: 2 BD: 4 +.-.: &p->sequence ->key#13 FD: 1 BD: 4447 ..-.: key#10 FD: 29 BD: 1 +.-.: (&journal->j_commit_timer) ->&p->pi_lock FD: 116 BD: 4 +.+.: &journal->j_checkpoint_mutex ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->tk_core.seq.seqcount ->&dd->lock ->&obj_hash[i].lock ->&base->lock ->bit_wait_table + i ->&rq->__lock ->&journal->j_state_lock ->&fq->mq_flush_lock ->&x->wait#26 ->&journal->j_list_lock ->(&timer.timer) ->&c->lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->rcu_node_0 ->&____s->seqcount ->&ei->i_es_lock ->&mapping->private_lock ->&meta->lock ->&sb->s_type->i_lock_key#3 ->lock#4 ->lock#5 ->&lruvec->lru_lock ->&____s->seqcount#2 ->&cfs_rq->removed.lock ->&n->list_lock FD: 29 BD: 171 ....: &journal->j_wait_transaction_locked ->&p->pi_lock FD: 1 BD: 4429 ..-.: &memcg->move_lock FD: 1 BD: 171 +.+.: &sbi->s_md_lock FD: 1 BD: 1 ....: &journal->j_fc_wait FD: 1 BD: 1 +.+.: &journal->j_history_lock FD: 2 BD: 2 +.+.: &dev->mutex#3 ->&vdev->fh_lock FD: 1 BD: 3 ....: key#11 FD: 44 BD: 4 +.+.: &sb->s_type->i_mutex_key#4/4 ->&dentry->d_lock ->tk_core.seq.seqcount ->rename_lock FD: 31 BD: 1 ..-.: drivers/base/dd.c:321 FD: 39 BD: 2 +.+.: (deferred_probe_timeout_work).work ->device_links_lock ->deferred_probe_mutex ->deferred_probe_work ->&x->wait#10 ->&pool->lock ->&rq->__lock ->&obj_hash[i].lock FD: 180 BD: 12 ++++: &sbi->s_writepages_rwsem ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->pool_lock#2 ->&c->lock ->lock#4 ->lock#5 ->&obj_hash[i].lock ->&journal->j_state_lock ->jbd2_handle ->tk_core.seq.seqcount ->&dd->lock ->&base->lock ->&rq_wait->wait ->rcu_node_0 ->&rq->__lock ->&xa->xa_lock#8 ->&____s->seqcount#2 ->&rcu_state.expedited_wq ->batched_entropy_u8.lock ->kfence_freelist_lock ->&rsp->gp_wait ->&ei->i_data_sem ->&rnp->exp_lock ->rcu_state.exp_mutex ->&ext4__ioend_wq[i] ->remove_cache_srcu ->&n->list_lock ->&folio_wait_table[i] ->&journal->j_wait_transaction_locked ->&journal->j_wait_commit ->&journal->j_wait_done_commit ->&mapping->private_lock ->&cfs_rq->removed.lock ->&meta->lock ->quarantine_lock FD: 84 BD: 1 .+.+: &type->s_umount_key#40 ->&sb->s_type->i_lock_key#3 ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->tk_core.seq.seqcount ->&dd->lock ->&obj_hash[i].lock ->&base->lock ->&c->lock ->lock#4 ->lock#5 ->&wb->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&rq->__lock ->&____s->seqcount#2 ->&____s->seqcount ->&n->list_lock ->lock#11 FD: 1 BD: 4447 ..-.: &s->s_inode_wblist_lock FD: 1 BD: 4448 ..-.: key#12 FD: 33 BD: 1 ..-.: &(&wb->bw_dwork)->timer FD: 69 BD: 2 +.+.: (work_completion)(&(&wb->bw_dwork)->work) ->&wb->list_lock FD: 131 BD: 1 .+.+: kn->active#40 ->fs_reclaim ->&c->lock ->&kernfs_locks->open_file_mutex[count] FD: 131 BD: 1 .+.+: kn->active#41 ->fs_reclaim ->&c->lock ->&n->list_lock ->&kernfs_locks->open_file_mutex[count] FD: 131 BD: 1 .+.+: kn->active#42 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 28 BD: 5 +.+.: &lo->lo_mutex ->&rq->__lock FD: 58 BD: 10 +.+.: &nbd->config_lock ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&bdev->bd_size_lock ->&q->queue_lock ->&ACCESS_PRIVATE(sdp, lock) ->set->srcu ->&obj_hash[i].lock ->&rq->__lock ->&x->wait#3 ->&c->lock ->(console_sem).lock ->&____s->seqcount#2 ->&____s->seqcount FD: 32 BD: 8 ....: &ACCESS_PRIVATE(ssp->srcu_sup, lock) ->&ACCESS_PRIVATE(sdp, lock) FD: 131 BD: 1 .+.+: kn->active#43 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 2 BD: 4 +.+.: &new->lock ->&mtdblk->cache_mutex FD: 1 BD: 5 +.+.: &mtdblk->cache_mutex FD: 131 BD: 1 .+.+: kn->active#44 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 213 BD: 1 +.+.: &mtd->master.chrdev_lock ->&mm->mmap_lock FD: 131 BD: 1 .+.+: kn->active#45 ->fs_reclaim ->&c->lock ->&kernfs_locks->open_file_mutex[count] FD: 15 BD: 1 +.-.: (&dom->period_timer) ->key#13 ->&p->sequence ->&obj_hash[i].lock ->&base->lock FD: 1 BD: 4449 ..-.: key#13 FD: 1 BD: 4 +.+.: destroy_lock FD: 33 BD: 1 ..-.: fs/notify/mark.c:89 FD: 139 BD: 2 +.+.: connector_reaper_work ->destroy_lock ->&ACCESS_PRIVATE(sdp, lock) ->&fsnotify_mark_srcu ->&obj_hash[i].lock ->&x->wait#3 ->&rq->__lock ->&____s->seqcount ->pool_lock#2 ->&cfs_rq->removed.lock ->&ACCESS_PRIVATE(ssp->srcu_sup, lock) ->rcu_node_0 ->&rcu_state.expedited_wq ->&base->lock ->pool_lock ->&meta->lock ->kfence_freelist_lock ->quarantine_lock FD: 139 BD: 2 +.+.: (reaper_work).work ->destroy_lock ->&ACCESS_PRIVATE(sdp, lock) ->&fsnotify_mark_srcu ->&obj_hash[i].lock ->&x->wait#3 ->&rq->__lock ->pool_lock#2 ->pool_lock ->&ACCESS_PRIVATE(ssp->srcu_sup, lock) ->&cfs_rq->removed.lock ->quarantine_lock ->&meta->lock ->kfence_freelist_lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 31 BD: 1 ..-.: &(&tbl->gc_work)->timer FD: 54 BD: 2 +.+.: (work_completion)(&(&tbl->gc_work)->work) ->&tbl->lock ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 1 BD: 1 +.+.: userns_state_mutex FD: 4 BD: 73 +...: fib_info_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 131 BD: 77 +...: &net->sctp.local_addr_lock ->&net->sctp.addr_wq_lock FD: 130 BD: 79 +.-.: &net->sctp.addr_wq_lock ->pool_lock#2 ->&obj_hash[i].lock ->&base->lock ->&c->lock ->&n->list_lock ->&____s->seqcount ->&____s->seqcount#2 ->slock-AF_INET6/1 ->slock-AF_INET/1 ->k-slock-AF_INET6/1 FD: 1 BD: 72 +...: _xmit_LOOPBACK FD: 28 BD: 79 .+.+: netpoll_srcu ->&rq->__lock FD: 16 BD: 85 +.-.: &in_dev->mc_tomb_lock ->pool_lock#2 ->&obj_hash[i].lock ->&____s->seqcount ->&c->lock ->&n->list_lock ->&zone->lock FD: 19 BD: 85 +.-.: &im->lock ->pool_lock#2 ->&c->lock ->&____s->seqcount ->&n->list_lock ->&____s->seqcount#2 ->&zone->lock ->&obj_hash[i].lock ->&base->lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->init_task.mems_allowed_seq.seqcount FD: 1 BD: 79 +.+.: cbs_list_lock FD: 31 BD: 76 +...: &net->ipv6.addrconf_hash_lock ->&obj_hash[i].lock ->&base->lock FD: 32 BD: 3875 +...: &ifa->lock ->batched_entropy_u32.lock ->crngs.lock ->&obj_hash[i].lock ->&base->lock FD: 54 BD: 3881 +...: &tb->tb6_lock ->&net->ipv6.fib6_walker_lock ->&____s->seqcount ->pool_lock#2 ->&c->lock ->nl_table_lock ->&obj_hash[i].lock ->nl_table_wait.lock ->rlock-AF_NETLINK ->rt6_exception_lock ->&n->list_lock ->&zone->lock ->&data->fib_event_queue_lock ->quarantine_lock ->&____s->seqcount#2 ->stock_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->(console_sem).lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock FD: 1 BD: 3882 ++..: &net->ipv6.fib6_walker_lock FD: 446 BD: 76 +.+.: sk_lock-AF_INET ->slock-AF_INET ->&table->hash[i].lock ->&tcp_hashinfo.bhash[i].lock ->&h->lhash2[i].lock ->&queue->rskq_lock ->clock-AF_INET ->&obj_hash[i].lock ->&base->lock ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&mm->mmap_lock ->tk_core.seq.seqcount ->&sd->defer_lock ->&meta->lock ->kfence_freelist_lock ->&rq->__lock ->batched_entropy_u8.lock ->mmu_notifier_invalidate_range_start ->&hashinfo->ehash_locks[i] ->elock-AF_INET ->remove_cache_srcu ->&dir->lock#2 ->rcu_node_0 ->&n->list_lock ->&rcu_state.expedited_wq ->&____s->seqcount#8 ->once_mutex ->&pool->lock ->batched_entropy_u32.lock ->batched_entropy_u16.lock ->&ei->socket.wq.wait ->quarantine_lock ->&sctp_port_hashtable[i].lock ->crngs.lock ->&____s->seqcount#2 ->&list->lock#5 ->&asoc->wait ->krc.lock ->sctp_assocs_id_lock ->&list->lock#18 ->cpu_hotplug_lock ->&msk->pm.lock ->stock_lock ->&psock->ingress_lock ->(&tw->tw_timer) ->&sb->s_type->i_lock_key#8 ->&dir->lock ->k-sk_lock-AF_INET/1 ->k-slock-AF_INET ->k-clock-AF_INET ->k-sk_lock-AF_INET ->&xa->xa_lock#8 ->&fsnotify_mark_srcu ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&token_hash[i].lock ->&cfs_rq->removed.lock ->&in_dev->mc_tomb_lock ->&im->lock ->_xmit_ETHER ->&f->f_owner.lock ->&dccp_hashinfo.bhash[i].lock ->&sighand->siglock ->&mux->lock ->prog_idr_lock ->bpf_lock ->free_vmap_area_lock ->vmap_area_lock ->pcpu_alloc_mutex ->pack_mutex ->text_mutex ->&fp->aux->used_maps_mutex ->hrtimer_bases.lock ->(console_sem).lock ->key#27 FD: 93 BD: 92 +.-.: slock-AF_INET ->&obj_hash[i].lock ->batched_entropy_u16.lock ->&tcp_hashinfo.bhash[i].lock ->&hashinfo->ehash_locks[i] ->tk_core.seq.seqcount ->(&req->rsk_timer) ->&base->lock ->&queue->rskq_lock ->pool_lock#2 ->&c->lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&____s->seqcount ->elock-AF_INET ->&sk->sk_lock.wq ->&n->list_lock ->batched_entropy_u32.lock ->&____s->seqcount#2 ->key#23 FD: 12 BD: 124 ++-.: clock-AF_INET ->&c->lock ->&n->list_lock ->pool_lock#2 ->&obj_hash[i].lock ->&mux->rx_lock FD: 507 BD: 74 +.+.: sk_lock-AF_INET6 ->slock-AF_INET6 ->&table->hash[i].lock ->&____s->seqcount#8 ->batched_entropy_u32.lock ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&obj_hash[i].lock ->batched_entropy_u16.lock ->&tcp_hashinfo.bhash[i].lock ->&h->lhash2[i].lock ->fs_reclaim ->&mm->mmap_lock ->once_lock ->&pool->lock ->rcu_node_0 ->&rq->__lock ->tk_core.seq.seqcount ->clock-AF_INET6 ->stock_lock ->remove_cache_srcu ->&msk->pm.lock ->elock-AF_INET6 ->&queue->rskq_lock ->&base->lock ->&hashinfo->ehash_locks[i] ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#8 ->&dir->lock ->k-sk_lock-AF_INET6/1 ->k-slock-AF_INET6 ->k-clock-AF_INET6 ->crngs.lock ->&token_hash[i].lock ->k-sk_lock-AF_INET6 ->&ei->socket.wq.wait ->&xa->xa_lock#8 ->&fsnotify_mark_srcu ->&sctp_port_hashtable[i].lock ->krc.lock ->ip6_fl_lock ->cpu_hotplug_lock ->l2tp_ip6_lock ->&zone->lock ->tcpv6_prot_mutex ->device_spinlock ->&n->list_lock ->crypto_alg_sem ->(kmod_concurrent_max).lock ->&x->wait#17 ->lock ->&asoc->wait ->running_helpers_waitq.lock ->(crypto_chain).rwsem ->&x->wait#21 ->(&timer.timer) ->&sw_ctx_tx->encrypt_compl_lock ->&net->xfrm.xfrm_policy_lock ->&____s->seqcount#14 ->&dccp_hashinfo.bhash[i].lock ->&list->lock#5 ->&____s->seqcount#2 ->wlock-AF_INET6 ->(console_sem).lock ->quarantine_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&cfs_rq->removed.lock ->&rcu_state.expedited_wq ->sctp_assocs_id_lock ->&list->lock#18 ->&meta->lock ->&f->f_owner.lock ->tcpv4_prot_mutex ->ip6_sk_fl_lock ->hrtimer_bases.lock ->&sd->defer_lock ->free_vmap_area_lock ->vmap_area_lock ->pcpu_alloc_mutex ->pack_mutex ->text_mutex ->&fp->aux->used_maps_mutex ->reuseport_lock ->&sighand->siglock ->&ndev->lock ->&sctp_ep_hashtable[i].lock ->sk_lock-AF_INET6/1 ->&newf->file_lock ->tcp_md5sig_mutex ->&sem->wait_lock ->&p->pi_lock ->acaddr_hash_lock ->&tb->tb6_lock ->&idev->mc_lock ->&f->f_lock ->&ping_table.lock ->key#27 FD: 100 BD: 101 +.-.: slock-AF_INET6 ->&obj_hash[i].lock ->elock-AF_INET6 ->pool_lock#2 ->&tcp_hashinfo.bhash[i].lock ->&sk->sk_lock.wq ->&c->lock ->&____s->seqcount ->&dccp_hashinfo.bhash[i].lock ->&hashinfo->ehash_locks[i] ->(&req->rsk_timer) ->&base->lock ->&queue->rskq_lock ->tk_core.seq.seqcount ->hrtimer_bases.lock ->batched_entropy_u16.lock ->clock-AF_INET6 ->&zone->lock ->krc.lock ->key#23 ->batched_entropy_u32.lock ->crngs.lock ->&n->list_lock ->&list->lock#18 ->sctp_assocs_id_lock ->&asoc->wait ->stock_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock FD: 55 BD: 132 ++--: clock-AF_INET6 ->&c->lock ->pool_lock#2 ->&n->list_lock ->rds_tcp_tc_list_lock ->&cp->cp_lock ->&rm->m_rs_lock ->&obj_hash[i].lock ->&____s->seqcount#2 ->&____s->seqcount ->&list->lock#45 ->tk_core.seq.seqcount ->&sd->defer_lock ->quarantine_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&lruvec->lru_lock FD: 131 BD: 1 .+.+: kn->active#46 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 465 BD: 72 ++++: dev_addr_sem ->net_rwsem ->&tn->lock ->&sdata->sec_mtx ->fs_reclaim ->pool_lock#2 ->nl_table_lock ->rlock-AF_NETLINK ->nl_table_wait.lock ->&tbl->lock ->&pn->hash_lock ->&obj_hash[i].lock ->input_pool.lock ->&c->lock ->&____s->seqcount ->rcu_node_0 ->&rq->__lock ->&n->list_lock ->&br->lock ->team->team_lock_key#2 ->team->team_lock_key ->team->team_lock_key#4 ->batched_entropy_u8.lock ->kfence_freelist_lock ->team->team_lock_key#5 ->team->team_lock_key#6 ->team->team_lock_key#3 ->_xmit_ETHER ->&hard_iface->bat_iv.ogm_buff_mutex ->&cfs_rq->removed.lock ->remove_cache_srcu ->quarantine_lock ->&____s->seqcount#2 ->&rcu_state.expedited_wq FD: 920 BD: 2 +.+.: nlk_cb_mutex-GENERIC ->fs_reclaim ->pool_lock#2 ->&____s->seqcount ->rtnl_mutex ->&rdev->wiphy.mtx ->rlock-AF_NETLINK ->&obj_hash[i].lock ->&c->lock ->&devlink->lock_key ->&n->list_lock ->&devlink->lock_key#2 ->&devlink->lock_key#3 ->&devlink->lock_key#4 ->&devlink->lock_key#5 ->&devlink->lock_key#6 ->genl_mutex ->&____s->seqcount#2 ->&rq->__lock ->&lock->wait_lock ->genl_mutex.wait_lock ->&p->pi_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->rtnl_mutex.wait_lock ->&dir->lock#2 ->ovs_mutex ->&ht->lock ->remove_cache_srcu ->&cfs_rq->removed.lock FD: 21 BD: 87 +...: &rdev->bss_lock ->&c->lock ->&____s->seqcount ->pool_lock#2 ->&obj_hash[i].lock ->krc.lock ->&____s->seqcount#2 ->&zone->lock ->&n->list_lock ->quarantine_lock ->&base->lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock FD: 131 BD: 1 +.-.: (&net->sctp.addr_wq_timer) ->&net->sctp.addr_wq_lock FD: 13 BD: 72 ++..: lapb_list_lock ->pool_lock#2 ->&obj_hash[i].lock ->&base->lock ->&c->lock ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 1 BD: 72 ++.-: x25_neigh_list_lock FD: 1 BD: 72 +...: _xmit_SLIP FD: 14 BD: 1 +.-.: (&eql->timer) ->&eql->queue.lock ->&obj_hash[i].lock ->&base->lock FD: 4 BD: 135 +.-.: &eql->queue.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 72 +...: &vi->refill_lock FD: 69 BD: 3784 +.-.: _xmit_ETHER#2 ->&obj_hash[i].lock ->pool_lock#2 ->&meta->lock ->kfence_freelist_lock ->quarantine_lock FD: 140 BD: 86 +.+.: &local->chanctx_mtx ->fs_reclaim ->&c->lock ->pool_lock#2 ->&data->mutex ->remove_cache_srcu ->&local->queue_stop_reason_lock ->&obj_hash[i].lock ->krc.lock ->rcu_node_0 ->&rq->__lock ->nl_table_lock ->nl_table_wait.lock ->&____s->seqcount ->&rdev->bss_lock ->&____s->seqcount#2 ->&n->list_lock FD: 1 BD: 87 +.+.: &data->mutex FD: 19 BD: 3758 +...: &local->filter_lock ->pool_lock#2 ->&c->lock ->&obj_hash[i].lock ->krc.lock ->&____s->seqcount ->&____s->seqcount#2 FD: 21 BD: 1 +.+.: (wq_completion)phy0 ->(work_completion)(&local->reconfig_filter) FD: 20 BD: 26 +.+.: (work_completion)(&local->reconfig_filter) ->&local->filter_lock FD: 100 BD: 77 +.-.: &dev->tx_global_lock ->_xmit_ETHER#2 ->&obj_hash[i].lock ->&base->lock ->_xmit_NETROM ->&qdisc_xmit_lock_key ->_xmit_LOOPBACK#2 ->&qdisc_xmit_lock_key#2 ->&vlan_netdev_xmit_lock_key ->&qdisc_xmit_lock_key#3 ->&batadv_netdev_xmit_lock_key ->&qdisc_xmit_lock_key#4 ->_xmit_NONE#2 ->_xmit_TUNNEL#2 ->_xmit_IPGRE#2 ->_xmit_TUNNEL6#2 ->_xmit_SIT#2 ->&qdisc_xmit_lock_key#5 FD: 21 BD: 3784 +.-.: &sch->q.lock ->batched_entropy_u32.lock ->tk_core.seq.seqcount ->pool_lock#2 ->hrtimer_bases.lock ->&c->lock ->&____s->seqcount ->&obj_hash[i].lock ->&n->list_lock ->&____s->seqcount#2 ->&base->lock ->&q->current_entry_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock FD: 1 BD: 77 ....: class FD: 1 BD: 77 ....: (&tbl->proxy_timer) FD: 21 BD: 1 +.+.: (wq_completion)phy1 ->(work_completion)(&local->reconfig_filter) FD: 1 BD: 72 +...: _xmit_VOID FD: 1 BD: 72 +...: _xmit_X25 FD: 4 BD: 73 +...: &lapbeth->up_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 73 BD: 73 +.-.: &lapb->lock ->&c->lock ->pool_lock#2 ->&obj_hash[i].lock ->&base->lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&____s->seqcount ->&n->list_lock ->&list->lock#24 ->&list->lock#25 ->&____s->seqcount#2 FD: 3 BD: 166 +.+.: &(ei->i_block_reservation_lock) ->key#14 ->key#3 FD: 904 BD: 2 +.+.: (work_completion)(&work->work) ->devices_rwsem ->&obj_hash[i].lock ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq ->quarantine_lock FD: 871 BD: 2 +.+.: (work_completion)(&(&ifa->dad_work)->work) ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->rcu_node_0 ->&rcu_state.expedited_wq FD: 1 BD: 3882 +.-.: rt6_exception_lock FD: 1 BD: 93 ....: &____s->seqcount#8 FD: 12 BD: 3786 +.-.: &ul->lock ->pool_lock#2 ->&dir->lock#2 ->&c->lock FD: 1 BD: 157 ....: &tty->ctrl.lock FD: 6 BD: 82 +.+.: fasync_lock ->&new->fa_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 1 +.+.: &buf->lock FD: 1 BD: 7 ....: &tty->flow.lock FD: 81 BD: 132 +.-.: dev->qdisc_tx_busylock ?: &qdisc_tx_busylock ->_xmit_ETHER#2 ->_xmit_SLIP#2 ->_xmit_NETROM ->&obj_hash[i].lock ->pool_lock#2 ->&sch->q.lock ->&meta->lock ->kfence_freelist_lock FD: 1 BD: 6 +.+.: &net->packet.sklist_lock FD: 239 BD: 3 +.+.: sk_lock-AF_PACKET ->slock-AF_PACKET ->&po->bind_lock ->&obj_hash[i].lock ->&rnp->exp_wq[3] ->&rq->__lock ->&mm->mmap_lock ->fs_reclaim ->pool_lock#2 ->free_vmap_area_lock ->vmap_area_lock ->&____s->seqcount ->&c->lock ->pcpu_alloc_mutex ->pack_mutex ->batched_entropy_u32.lock ->text_mutex ->&fp->aux->used_maps_mutex ->&rnp->exp_wq[1] ->&n->list_lock ->&rnp->exp_wq[2] ->&rnp->exp_lock ->rcu_state.exp_mutex ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock ->init_mm.page_table_lock ->&po->pg_vec_lock ->stock_lock FD: 1 BD: 4 +...: slock-AF_PACKET FD: 17 BD: 5 +.+.: &po->bind_lock ->ptype_lock ->pool_lock#2 ->&dir->lock#2 ->&match->lock FD: 1 BD: 3801 +.-.: rlock-AF_PACKET FD: 1 BD: 3 +...: wlock-AF_PACKET FD: 260 BD: 4 +.+.: &ldata->atomic_read_lock ->&tty->termios_rwsem ->(work_completion)(&buf->work) ->&rq->__lock FD: 1 BD: 5 +.+.: (work_completion)(&buf->work) FD: 31 BD: 1 ..-.: &(&idev->mc_dad_work)->timer FD: 214 BD: 1 +.+.: (wq_completion)mld ->(work_completion)(&(&idev->mc_dad_work)->work) ->(work_completion)(&(&idev->mc_ifc_work)->work) ->(work_completion)(&(&idev->mc_report_work)->work) ->&rq->__lock FD: 210 BD: 2 +.+.: (work_completion)(&(&idev->mc_dad_work)->work) ->&idev->mc_lock FD: 2 BD: 4447 ..-.: &pl->lock ->key#12 FD: 31 BD: 1 ..-.: &(&idev->mc_ifc_work)->timer FD: 210 BD: 2 +.+.: (work_completion)(&(&idev->mc_ifc_work)->work) ->&idev->mc_lock ->&rq->__lock FD: 12 BD: 3772 +.-.: &ul->lock#2 ->pool_lock#2 ->&dir->lock#2 ->&c->lock ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 22 BD: 3787 ++--: &n->lock ->&obj_hash[i].lock ->&base->lock ->&c->lock ->pool_lock#2 ->&(&n->ha_lock)->lock ->&____s->seqcount#9 ->&n->list_lock ->&____s->seqcount#2 ->&____s->seqcount ->batched_entropy_u8.lock ->kfence_freelist_lock ->&dir->lock ->stock_lock ->quarantine_lock ->&meta->lock ->&(&n->hh.hh_lock)->lock FD: 1 BD: 3794 +.--: &____s->seqcount#9 FD: 40 BD: 2 +.+.: (work_completion)(&w->work)#2 ->pool_lock#2 ->&dir->lock ->&obj_hash[i].lock ->nf_conntrack_mutex ->&rq->__lock ->&meta->lock ->kfence_freelist_lock ->nf_conntrack_mutex.wait_lock ->&p->pi_lock FD: 1 BD: 3799 +..-: &____s->seqcount#10 FD: 133 BD: 3 +.+.: fanout_mutex ->&rq->__lock ->fs_reclaim ->pool_lock#2 ->&po->bind_lock ->&c->lock ->&n->list_lock ->remove_cache_srcu ->&____s->seqcount#2 ->&____s->seqcount FD: 1 BD: 3 +...: clock-AF_PACKET FD: 1 BD: 3 ....: elock-AF_PACKET FD: 31 BD: 1 ..-.: &(&ifa->dad_work)->timer FD: 40 BD: 123 +.-.: &ct->lock ->(console_sem).lock FD: 101 BD: 1 +.-.: (&dev->watchdog_timer) ->&dev->tx_global_lock FD: 74 BD: 1 +.-.: (&lapb->t1timer) ->&lapb->lock FD: 31 BD: 1 ..-.: &(&dm_bufio_cleanup_old_work)->timer FD: 15 BD: 1 +.+.: (wq_completion)dm_bufio_cache ->(work_completion)(&(&dm_bufio_cleanup_old_work)->work) FD: 14 BD: 2 +.+.: (work_completion)(&(&dm_bufio_cleanup_old_work)->work) ->dm_bufio_clients_lock ->&obj_hash[i].lock ->&base->lock FD: 5 BD: 3773 +.-.: &nf_conntrack_locks[i] ->&nf_conntrack_locks[i]/1 ->batched_entropy_u8.lock FD: 4 BD: 3774 +.-.: &nf_conntrack_locks[i]/1 ->batched_entropy_u8.lock FD: 1 BD: 128 +.-.: &hashinfo->ehash_locks[i] FD: 2 BD: 3788 +.-.: &(&n->ha_lock)->lock ->&____s->seqcount#9 FD: 1 BD: 3787 +.-.: lock#8 FD: 1 BD: 3789 ..-.: id_table_lock FD: 2 BD: 115 +.-.: (&req->rsk_timer) ->&hashinfo->ehash_locks[i] FD: 1 BD: 115 +.-.: &queue->rskq_lock FD: 19 BD: 103 +.-.: tcp_metrics_lock ->pool_lock#2 ->&c->lock ->&____s->seqcount#2 ->&n->list_lock ->&obj_hash[i].lock ->krc.lock ->&____s->seqcount FD: 94 BD: 80 +.-.: slock-AF_INET/1 ->tk_core.seq.seqcount ->pool_lock#2 ->&c->lock ->&obj_hash[i].lock ->&base->lock ->&meta->lock ->kfence_freelist_lock ->&n->list_lock ->&____s->seqcount ->&hashinfo->ehash_locks[i] ->&tcp_hashinfo.bhash[i].lock ->elock-AF_INET ->quarantine_lock ->batched_entropy_u8.lock ->&zone->lock ->&sctp_ep_hashtable[i].lock ->krc.lock ->&sctp_port_hashtable[i].lock ->clock-AF_INET ->&____s->seqcount#2 ->&f->f_owner.lock ->key#26 FD: 1 BD: 110 +.-.: &sd->defer_lock FD: 133 BD: 1 +.-.: (&icsk->icsk_delack_timer) ->slock-AF_INET ->k-slock-AF_INET6 ->k-slock-AF_INET ->slock-AF_INET6 FD: 140 BD: 1 +.-.: (&icsk->icsk_retransmit_timer) ->slock-AF_INET ->slock-AF_INET6 ->&obj_hash[i].lock ->pool_lock#2 ->&dir->lock ->stock_lock ->k-slock-AF_INET6 ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->k-slock-AF_INET FD: 1 BD: 103 ..-.: elock-AF_INET FD: 1 BD: 170 ....: key#14 FD: 94 BD: 168 +.+.: &lg->lg_mutex ->&ei->i_prealloc_lock ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&mapping->private_lock ->&ret->b_state_lock ->&journal->j_revoke_lock ->&pa->pa_lock ->&lg->lg_prealloc_lock ->&rq->__lock ->key#3 ->stock_lock ->&xa->xa_lock#8 ->lock#4 ->tk_core.seq.seqcount ->&dd->lock ->bit_wait_table + i ->&obj_hash[i].lock FD: 1 BD: 170 +.+.: &pa->pa_lock FD: 1 BD: 170 +.+.: &lg->lg_prealloc_lock FD: 29 BD: 165 ..-.: &rq_wait->wait ->&p->pi_lock FD: 33 BD: 3 ..-.: &ei->i_completed_io_lock FD: 172 BD: 1 +.+.: (wq_completion)ext4-rsv-conversion ->(work_completion)(&ei->i_rsv_conversion_work) ->&rq->__lock FD: 171 BD: 2 +.+.: (work_completion)(&ei->i_rsv_conversion_work) ->&ei->i_completed_io_lock ->&journal->j_state_lock ->jbd2_handle ->&obj_hash[i].lock ->pool_lock#2 ->&ext4__ioend_wq[i] ->&ret->b_uptodate_lock ->&folio_wait_table[i] ->rcu_node_0 ->&rq->__lock ->quarantine_lock ->mmu_notifier_invalidate_range_start ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&rcu_state.expedited_wq ->remove_cache_srcu ->&meta->lock ->kfence_freelist_lock ->&n->list_lock ->&cfs_rq->removed.lock FD: 29 BD: 169 ....: &journal->j_wait_reserved ->&p->pi_lock FD: 1 BD: 15 ....: &ext4__ioend_wq[i] FD: 1 BD: 4418 +.+.: &pa->pa_lock#2 FD: 1 BD: 4420 ....: key#15 FD: 31 BD: 1 ..-.: drivers/regulator/core.c:6262 FD: 4 BD: 2 +.+.: (regulator_init_complete_work).work ->&k->list_lock ->&k->k_lock FD: 86 BD: 165 +.+.: &sbi->s_orphan_lock ->&ret->b_state_lock ->&rq->__lock ->bit_wait_table + i ->&ei->i_raw_lock ->&lock->wait_lock ->rcu_node_0 ->mmu_notifier_invalidate_range_start ->&____s->seqcount ->pool_lock#2 ->&mapping->private_lock ->&obj_hash[i].lock ->pgd_lock ->stock_lock ->key ->pcpu_lock ->percpu_counters_lock ->&cfs_rq->removed.lock FD: 131 BD: 1 .+.+: kn->active#47 ->fs_reclaim ->&c->lock ->&kernfs_locks->open_file_mutex[count] FD: 1 BD: 1 +.+.: &futex_queues[i].lock FD: 1 BD: 4 ....: &on->poll FD: 1 BD: 4 +.+.: module_mutex FD: 3 BD: 77 +.+.: once_mutex ->crngs.lock FD: 214 BD: 1 .+.+: sb_writers#9 ->&attr->mutex ->&mm->mmap_lock FD: 213 BD: 2 +.+.: &attr->mutex ->&mm->mmap_lock FD: 144 BD: 1 +.+.: &type->s_umount_key#41/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&root->kernfs_rwsem ->&sb->s_type->i_lock_key#30 ->&root->kernfs_supers_rwsem ->&dentry->d_lock FD: 42 BD: 4418 +.+.: &sb->s_type->i_lock_key#30 ->&dentry->d_lock FD: 915 BD: 2 .+.+: sb_writers#10 ->mount_lock ->&type->i_mutex_dir_key#6 ->fs_reclaim ->&mm->mmap_lock ->&of->mutex ->&obj_hash[i].lock ->&type->i_mutex_dir_key#6/1 ->&c->lock ->&____s->seqcount ->pool_lock#2 FD: 103 BD: 3 ++++: &type->i_mutex_dir_key#6 ->tomoyo_ss ->tk_core.seq.seqcount ->&root->kernfs_iattr_rwsem ->rename_lock.seqcount ->fs_reclaim ->&dentry->d_lock ->&root->kernfs_rwsem ->&sb->s_type->i_lock_key#30 ->&c->lock ->&rq->__lock ->&xa->xa_lock#4 ->&obj_hash[i].lock ->stock_lock ->pool_lock#2 ->&n->list_lock FD: 131 BD: 1 ++++: kn->active#48 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->pool_lock#2 FD: 1 BD: 1 +.+.: &sb->s_type->i_mutex_key#15 FD: 144 BD: 1 +.+.: &type->s_umount_key#42/1 ->fs_reclaim ->pool_lock#2 ->pcpu_alloc_mutex ->shrinker_rwsem ->&c->lock ->list_lrus_mutex ->sb_lock ->&root->kernfs_rwsem ->&sb->s_type->i_lock_key#31 ->&root->kernfs_supers_rwsem ->&dentry->d_lock ->&n->list_lock ->&rq->__lock FD: 42 BD: 4430 +.+.: &sb->s_type->i_lock_key#31 ->&dentry->d_lock FD: 130 BD: 1 ++++: &type->s_umount_key#43 ->shrinker_rwsem ->percpu_ref_switch_lock ->&root->kernfs_supers_rwsem ->rename_lock.seqcount ->&dentry->d_lock ->&sb->s_type->i_lock_key#31 ->&s->s_inode_list_lock ->&xa->xa_lock#8 ->inode_hash_lock ->&obj_hash[i].lock ->pool_lock#2 ->&fsnotify_mark_srcu ->sb_lock ->&rq->__lock ->&lru->node[i].lock FD: 880 BD: 2 +.+.: (work_completion)(&cgrp->bpf.release_work) ->cgroup_mutex ->percpu_ref_switch_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 886 BD: 1 +.+.: (wq_completion)cgroup_destroy ->(work_completion)(&css->destroy_work) ->(work_completion)(&(&css->destroy_rwork)->work) FD: 880 BD: 2 +.+.: (work_completion)(&css->destroy_work) ->cgroup_mutex ->&obj_hash[i].lock ->pool_lock#2 FD: 884 BD: 2 +.+.: (work_completion)(&(&css->destroy_rwork)->work) ->percpu_ref_switch_lock ->&obj_hash[i].lock ->pool_lock#2 ->&cgrp->pidlist_mutex ->(wq_completion)cgroup_pidlist_destroy ->&wq->mutex ->(work_completion)(&cgrp->release_agent_work) ->cgroup_mutex ->cgroup_rstat_lock ->pcpu_lock ->&root->kernfs_rwsem ->kernfs_idr_lock FD: 132 BD: 12 +.+.: &cgrp->pidlist_mutex ->css_set_lock ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->&base->lock ->&rq->__lock FD: 134 BD: 3 +.+.: (wq_completion)cgroup_pidlist_destroy ->(work_completion)(&(&l->destroy_dwork)->work) FD: 1 BD: 3 +.+.: (work_completion)(&cgrp->release_agent_work) FD: 926 BD: 2 .+.+: sb_writers#11 ->mount_lock ->&type->i_mutex_dir_key#7 ->fs_reclaim ->&mm->mmap_lock ->&of->mutex ->&obj_hash[i].lock ->&type->i_mutex_dir_key#7/1 ->&sem->wait_lock ->&p->pi_lock ->stock_lock ->&c->lock ->&n->list_lock ->&rq->__lock ->pool_lock#2 ->&____s->seqcount ->&p->lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#31 ->&wb->list_lock ->&____s->seqcount#2 ->rcu_node_0 ->&root->kernfs_iattr_rwsem ->&dentry->d_lock ->tomoyo_ss ->&sb->s_type->i_mutex_key#16 ->iattr_mutex ->&xattrs->lock FD: 107 BD: 3 ++++: &type->i_mutex_dir_key#7 ->tomoyo_ss ->tk_core.seq.seqcount ->&root->kernfs_iattr_rwsem ->rename_lock.seqcount ->fs_reclaim ->&dentry->d_lock ->&root->kernfs_rwsem ->&sb->s_type->i_lock_key#31 ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&n->list_lock ->pool_lock#2 ->&xa->xa_lock#4 ->&obj_hash[i].lock ->stock_lock ->&sem->wait_lock ->&rq->__lock ->remove_cache_srcu FD: 1 BD: 16 +.+.: &dom->lock FD: 131 BD: 1 .+.+: kn->active#49 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 130 BD: 3 +.+.: &sb->s_type->i_mutex_key#16 ->tk_core.seq.seqcount ->&root->kernfs_iattr_rwsem FD: 286 BD: 1 .+.+: kn->active#50 ->fs_reclaim ->&c->lock ->&kernfs_locks->open_file_mutex[count] ->cpu_hotplug_lock FD: 44 BD: 3 +.+.: &type->s_umount_key#44 ->sb_lock ->&dentry->d_lock FD: 150 BD: 2 +.+.: &sb->s_type->i_mutex_key#17 ->namespace_sem ->rename_lock.seqcount ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#26 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->pin_fs_lock ->sb_lock ->&type->s_umount_key#44 ->mnt_id_ida.xa_lock ->pcpu_alloc_mutex ->mount_lock ->&obj_hash[i].lock ->entries_lock FD: 228 BD: 1 .+.+: sb_writers#12 ->fs_reclaim ->&c->lock ->pool_lock#2 ->&mm->mmap_lock ->&sb->s_type->i_mutex_key#17 FD: 1 BD: 77 ++..: &pn->hash_lock FD: 55 BD: 1 +...: &net->ipv6.fib6_gc_lock ->&obj_hash[i].lock FD: 1 BD: 72 +...: _xmit_IEEE802154 FD: 128 BD: 9 +.+.: swap_cgroup_mutex ->fs_reclaim ->&____s->seqcount ->pool_lock#2 FD: 133 BD: 9 +.+.: swapon_mutex ->fs_reclaim ->pool_lock#2 ->swap_lock ->percpu_ref_switch_lock ->(console_sem).lock FD: 2 BD: 12 +.+.: &p->lock#2 ->swap_avail_lock FD: 1 BD: 13 +.+.: swap_avail_lock FD: 1 BD: 9 ....: proc_poll_wait.lock FD: 287 BD: 1 +.+.: swap_slots_cache_enable_mutex ->cpu_hotplug_lock ->swap_lock FD: 1 BD: 146 +.+.: swap_slots_cache_mutex FD: 32 BD: 1 +.-.: (&timer) ->&obj_hash[i].lock ->&base->lock ->&txlock ->&txwq FD: 1 BD: 3857 ..-.: &list->lock#5 FD: 7 BD: 133 +...: _xmit_SLIP#2 ->&eql->queue.lock FD: 45 BD: 139 +...: _xmit_NETROM ->(console_sem).lock ->console_owner_lock ->&obj_hash[i].lock ->pool_lock#2 ->&rdev->wpan_phy.queue_lock ->&rdev->wpan_phy.sync_txq FD: 7 BD: 1 +...: _xmit_X25#2 ->&lapbeth->up_lock FD: 31 BD: 1 ..-.: net/wireless/reg.c:236 FD: 871 BD: 2 +.+.: (reg_check_chans).work ->rtnl_mutex FD: 31 BD: 1 ..-.: net/wireless/reg.c:533 FD: 871 BD: 2 +.+.: (crda_timeout).work ->rtnl_mutex FD: 82 BD: 1 +.-.: (&n->timer) ->&n->lock ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->pool_lock#2 ->&obj_hash[i].lock ->icmp_global.lock ->&dir->lock#2 ->nl_table_lock ->nl_table_wait.lock ->&ul->lock#2 ->stock_lock ->&n->list_lock ->&meta->lock ->kfence_freelist_lock ->quarantine_lock ->&dir->lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 122 BD: 1 +.-.: (&sk->sk_timer) ->slock-AF_INET ->k-slock-AF_INET6 FD: 1 BD: 220 ....: &newf->resize_wait FD: 3 BD: 153 ..-.: &kcov->lock ->kcov_remote_lock FD: 158 BD: 1 +.+.: pid_caches_mutex ->slab_mutex FD: 44 BD: 1 +.+.: &type->s_umount_key#45 ->sb_lock ->&dentry->d_lock FD: 144 BD: 2 ++++: &sb->s_type->i_mutex_key#18 ->namespace_sem ->&dentry->d_lock ->tk_core.seq.seqcount ->&rq->__lock FD: 7 BD: 25 ++++: hci_sk_list.lock ->pool_lock#2 ->tk_core.seq.seqcount ->rlock-AF_BLUETOOTH FD: 1 BD: 1 +.+.: (work_completion)(&(&data->open_timeout)->work) FD: 310 BD: 1 +.+.: &data->open_mutex ->fs_reclaim ->pool_lock#2 ->&____s->seqcount ->&obj_hash[i].lock ->&x->wait#9 ->hci_index_ida.xa_lock ->cpu_hotplug_lock ->wq_pool_mutex ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->subsys mutex#81 ->&c->lock ->&dev->devres_lock ->triggers_list_lock ->leds_list_lock ->rfkill_global_mutex ->rfkill_global_mutex.wait_lock ->&p->pi_lock ->&rq->__lock ->&rfkill->lock ->hci_dev_list_lock ->tk_core.seq.seqcount ->hci_sk_list.lock ->(pm_chain_head).rwsem ->&list->lock#6 ->&data->read_wait ->&n->list_lock FD: 1 BD: 2 ....: hci_index_ida.xa_lock FD: 3 BD: 23 +.+.: subsys mutex#81 ->&k->k_lock FD: 13 BD: 24 ++++: hci_dev_list_lock ->pool_lock#2 ->tk_core.seq.seqcount ->rlock-AF_BLUETOOTH ->&c->lock ->&n->list_lock FD: 303 BD: 1 +.+.: (wq_completion)hci0 ->(work_completion)(&hdev->power_on) ->(work_completion)(&hdev->cmd_sync_work) FD: 301 BD: 7 +.+.: (work_completion)(&hdev->power_on) ->&hdev->req_lock ->fs_reclaim ->pool_lock#2 ->tk_core.seq.seqcount ->hci_sk_list.lock ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount FD: 1 BD: 19 ....: &list->lock#6 FD: 300 BD: 9 +.+.: &hdev->req_lock ->&obj_hash[i].lock ->&list->lock#7 ->pool_lock#2 ->&list->lock#8 ->&hdev->req_wait_q ->&base->lock ->&rq->__lock ->(&timer.timer) ->&c->lock ->&____s->seqcount ->batched_entropy_u8.lock ->kfence_freelist_lock ->tk_core.seq.seqcount ->hci_sk_list.lock ->&n->list_lock ->(console_sem).lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&hdev->lock ->&wq->mutex ->(wq_completion)hci0#2 ->&list->lock#6 ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock ->&cfs_rq->removed.lock ->(work_completion)(&(&hdev->interleave_scan)->work) ->hci_dev_list_lock ->(work_completion)(&hdev->tx_work) ->(work_completion)(&hdev->rx_work) ->(work_completion)(&(&hdev->rpa_expired)->work) ->(work_completion)(&hdev->cmd_work) ->(work_completion)(&(&hdev->cmd_timer)->work) ->&lock->wait_lock FD: 29 BD: 19 ....: &data->read_wait ->&p->pi_lock FD: 1 BD: 25 ....: &list->lock#7 FD: 1 BD: 10 ....: &list->lock#8 FD: 29 BD: 17 ....: &hdev->req_wait_q ->&p->pi_lock FD: 290 BD: 3 +.+.: sk_lock-AF_BLUETOOTH-BTPROTO_HCI ->slock-AF_BLUETOOTH-BTPROTO_HCI ->sock_cookie_ida.xa_lock ->&p->alloc_lock ->pool_lock#2 ->tk_core.seq.seqcount ->hci_sk_list.lock ->&obj_hash[i].lock ->clock-AF_BLUETOOTH ->&rq->__lock ->&c->lock ->&____s->seqcount ->mgmt_chan_list_lock ->mgmt_chan_list_lock.wait_lock ->&p->pi_lock ->&n->list_lock ->&____s->seqcount#2 ->&mm->mmap_lock ->hci_dev_list_lock ->&hdev->lock ->rlock-AF_BLUETOOTH ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->(console_sem).lock ->rcu_node_0 ->quarantine_lock ->&list->lock#7 FD: 30 BD: 4 +...: slock-AF_BLUETOOTH-BTPROTO_HCI ->&sk->sk_lock.wq#2 FD: 1 BD: 4 ....: sock_cookie_ida.xa_lock FD: 294 BD: 10 +.+.: (wq_completion)hci0#2 ->(work_completion)(&hdev->cmd_work) ->(work_completion)(&hdev->rx_work) ->(work_completion)(&hdev->tx_work) ->(work_completion)(&conn->pending_rx_work) ->(work_completion)(&(&hdev->cmd_timer)->work) ->(work_completion)(&(&conn->disc_work)->work) FD: 137 BD: 16 +.+.: (work_completion)(&hdev->cmd_work) ->&list->lock#7 ->fs_reclaim ->&c->lock ->pool_lock#2 ->tk_core.seq.seqcount ->&list->lock#6 ->&data->read_wait ->&obj_hash[i].lock ->&____s->seqcount ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&n->list_lock ->&rq->__lock ->remove_cache_srcu ->hci_sk_list.lock ->hci_dev_list_lock ->rlock-AF_BLUETOOTH ->wlock-AF_BLUETOOTH ->&dir->lock FD: 287 BD: 16 +.+.: (work_completion)(&hdev->rx_work) ->&list->lock#7 ->lock#6 ->fs_reclaim ->pool_lock#2 ->free_vmap_area_lock ->vmap_area_lock ->&____s->seqcount ->init_mm.page_table_lock ->&hdev->lock ->(console_sem).lock ->console_owner_lock ->console_owner ->&rq->__lock ->&obj_hash[i].lock ->&hdev->req_wait_q ->&base->lock ->&c->lock ->chan_list_lock FD: 284 BD: 21 +.+.: &hdev->lock ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->&x->wait#9 ->&k->list_lock ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&c->lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->&k->k_lock ->subsys mutex#81 ->&list->lock#7 ->&hdev->unregister_lock ->hci_cb_list_lock ->&base->lock ->tk_core.seq.seqcount ->hci_sk_list.lock ->&____s->seqcount ->&n->list_lock ->remove_cache_srcu ->rcu_node_0 ->&rq->__lock ->sk_lock-AF_BLUETOOTH-BTPROTO_SCO ->slock-AF_BLUETOOTH-BTPROTO_SCO ->&conn->chan_lock ->rlock-AF_BLUETOOTH ->hci_dev_list_lock ->wlock-AF_BLUETOOTH ->&dir->lock ->(work_completion)(&(&conn->disc_work)->work) ->(work_completion)(&(&conn->auto_accept_work)->work) ->(work_completion)(&(&conn->idle_work)->work) ->&list->lock#9 ->dev_pm_qos_sysfs_mtx ->kernfs_idr_lock ->deferred_probe_mutex ->device_links_lock ->mmu_notifier_invalidate_range_start FD: 303 BD: 1 +.+.: (wq_completion)hci1 ->(work_completion)(&hdev->power_on) ->(work_completion)(&hdev->cmd_sync_work) FD: 294 BD: 1 +.+.: (wq_completion)hci1#2 ->(work_completion)(&hdev->cmd_work) ->(work_completion)(&hdev->rx_work) ->(work_completion)(&hdev->tx_work) ->(work_completion)(&conn->pending_rx_work) ->(work_completion)(&(&hdev->cmd_timer)->work) ->(work_completion)(&(&conn->disc_work)->work) FD: 303 BD: 1 +.+.: (wq_completion)hci2 ->(work_completion)(&hdev->power_on) ->(work_completion)(&hdev->cmd_sync_work) FD: 294 BD: 1 +.+.: (wq_completion)hci2#2 ->(work_completion)(&hdev->cmd_work) ->(work_completion)(&hdev->rx_work) ->(work_completion)(&hdev->tx_work) ->(work_completion)(&conn->pending_rx_work) ->(work_completion)(&(&hdev->cmd_timer)->work) ->(work_completion)(&(&conn->disc_work)->work) FD: 129 BD: 22 +.+.: &hdev->unregister_lock ->fs_reclaim ->pool_lock#2 ->&hdev->cmd_sync_work_lock ->&c->lock ->&rq->__lock FD: 1 BD: 23 +.+.: &hdev->cmd_sync_work_lock FD: 301 BD: 7 +.+.: (work_completion)(&hdev->cmd_sync_work) ->&hdev->cmd_sync_work_lock ->&hdev->req_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 27 +.+.: &conn->ident_lock FD: 1 BD: 28 ....: &list->lock#9 FD: 138 BD: 25 +.+.: &conn->chan_lock ->&rq->__lock ->&chan->lock/1 FD: 303 BD: 1 +.+.: (wq_completion)hci3 ->(work_completion)(&hdev->power_on) ->(work_completion)(&hdev->cmd_sync_work) FD: 294 BD: 1 +.+.: (wq_completion)hci3#2 ->(work_completion)(&hdev->cmd_work) ->(work_completion)(&hdev->rx_work) ->(work_completion)(&hdev->tx_work) ->(work_completion)(&conn->pending_rx_work) ->(work_completion)(&(&hdev->cmd_timer)->work) ->(work_completion)(&(&conn->disc_work)->work) FD: 33 BD: 16 +.+.: (work_completion)(&hdev->tx_work) ->&list->lock#9 ->tk_core.seq.seqcount ->&list->lock#6 ->&data->read_wait ->&list->lock#7 FD: 2 BD: 16 +.+.: (work_completion)(&conn->pending_rx_work) ->&list->lock#10 FD: 1 BD: 24 ....: &list->lock#10 FD: 1 BD: 27 +...: clock-AF_BLUETOOTH FD: 1 BD: 32 ....: rlock-AF_BLUETOOTH FD: 1 BD: 28 ....: wlock-AF_BLUETOOTH FD: 28 BD: 1 +.+.: &sb->s_type->i_mutex_key#19 ->&rq->__lock FD: 1 BD: 23 +.+.: hci_cb_list_lock.wait_lock FD: 1 BD: 1 +.+.: &undo_list->lock FD: 303 BD: 1 +.+.: (wq_completion)hci4 ->(work_completion)(&hdev->power_on) ->(work_completion)(&hdev->cmd_sync_work) FD: 294 BD: 1 +.+.: (wq_completion)hci4#2 ->(work_completion)(&hdev->cmd_work) ->(work_completion)(&hdev->rx_work) ->(work_completion)(&hdev->tx_work) ->(work_completion)(&conn->pending_rx_work) ->(work_completion)(&(&hdev->cmd_timer)->work) ->(work_completion)(&(&conn->disc_work)->work) FD: 303 BD: 1 +.+.: (wq_completion)hci5 ->(work_completion)(&hdev->power_on) ->(work_completion)(&hdev->cmd_sync_work) FD: 294 BD: 1 +.+.: (wq_completion)hci5#2 ->(work_completion)(&hdev->cmd_work) ->(work_completion)(&hdev->rx_work) ->(work_completion)(&hdev->tx_work) ->(work_completion)(&conn->pending_rx_work) ->(work_completion)(&(&hdev->cmd_timer)->work) ->(work_completion)(&(&conn->disc_work)->work) FD: 1 BD: 72 +...: &nr_netdev_addr_lock_key FD: 1 BD: 72 +...: listen_lock FD: 7 BD: 13 +.+.: rdma_nets.xa_lock ->&c->lock ->pool_lock#2 FD: 1 BD: 228 +.+.: uevent_sock_mutex.wait_lock FD: 1 BD: 4 +.+.: &____s->seqcount#11 FD: 2 BD: 3 +.+.: &(&net->ipv4.ping_group_range.lock)->lock ->&____s->seqcount#11 FD: 5 BD: 72 +.+.: &r->consumer_lock ->&r->producer_lock FD: 4 BD: 3775 +.-.: &r->producer_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 31 BD: 3717 +...: &bridge_netdev_addr_lock_key ->pool_lock#2 ->&c->lock ->&n->list_lock ->&obj_hash[i].lock ->krc.lock ->&____s->seqcount#2 ->&____s->seqcount ->batched_entropy_u8.lock ->kfence_freelist_lock ->&zone->lock ->&pgdat->kswapd_wait FD: 42 BD: 78 +.-.: &br->hash_lock ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&n->list_lock ->&____s->seqcount#2 ->rlock-AF_NETLINK ->batched_entropy_u8.lock ->kfence_freelist_lock ->quarantine_lock ->&zone->lock FD: 130 BD: 73 +.+.: j1939_netdev_lock ->fs_reclaim ->pool_lock#2 ->&net->can.rcvlists_lock ->&obj_hash[i].lock ->&priv->lock FD: 19 BD: 3716 +...: &dev_addr_list_lock_key#2 ->pool_lock#2 ->&c->lock ->&obj_hash[i].lock ->krc.lock FD: 8 BD: 72 +...: &bat_priv->tvlv.handler_list_lock ->pool_lock#2 ->&c->lock ->&n->list_lock FD: 13 BD: 79 +...: &bat_priv->tvlv.container_list_lock ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount#2 ->&n->list_lock ->&____s->seqcount ->quarantine_lock FD: 19 BD: 3717 +...: &batadv_netdev_addr_lock_key ->pool_lock#2 ->&obj_hash[i].lock ->krc.lock ->&c->lock ->&n->list_lock FD: 10 BD: 86 +...: &bat_priv->softif_vlan_list_lock ->pool_lock#2 ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 22 BD: 85 +...: key#16 ->&bat_priv->softif_vlan_list_lock ->&obj_hash[i].lock ->pool_lock#2 ->krc.lock ->&____s->seqcount ->&bat_priv->tt.changes_list_lock FD: 6 BD: 86 +...: &bat_priv->tt.changes_list_lock ->&obj_hash[i].lock ->pool_lock#2 ->&meta->lock ->kfence_freelist_lock FD: 33 BD: 1 ..-.: &(&bat_priv->nc.work)->timer FD: 81 BD: 1 +.+.: (wq_completion)bat_events ->(work_completion)(&(&bat_priv->nc.work)->work) ->(work_completion)(&(&bat_priv->mcast.work)->work) ->(work_completion)(&(&bat_priv->orig_work)->work) ->(work_completion)(&(&forw_packet_aggr->delayed_work)->work) ->(work_completion)(&(&bat_priv->tt.work)->work) ->(work_completion)(&(&bat_priv->dat.work)->work) ->(work_completion)(&(&bat_priv->bla.work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 34 BD: 2 +.+.: (work_completion)(&(&bat_priv->nc.work)->work) ->key#17 ->key#18 ->&obj_hash[i].lock ->&base->lock ->pool_lock#2 ->rcu_node_0 ->&rq->__lock ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock FD: 1 BD: 3 +...: key#17 FD: 1 BD: 3 +...: key#18 FD: 158 BD: 73 +.+.: init_lock ->slab_mutex ->fs_reclaim ->&zone->lock ->&____s->seqcount ->pool_lock#2 ->&obj_hash[i].lock ->&base->lock ->crngs.lock FD: 1 BD: 3728 +.-.: deferred_lock FD: 871 BD: 2 +.+.: deferred_process_work ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&rq->__lock FD: 1 BD: 72 ....: target_list_lock FD: 57 BD: 75 +.-.: &br->lock ->&br->hash_lock ->lweventlist_lock ->&obj_hash[i].lock ->&base->lock ->pool_lock#2 ->&dir->lock#2 ->deferred_lock ->(console_sem).lock ->&____s->seqcount ->nl_table_lock ->nl_table_wait.lock ->&br->multicast_lock ->&c->lock ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 1 BD: 3690 +.+.: &bond->stats_lock/1 FD: 152 BD: 1 +.+.: (wq_completion)bond0 ->(work_completion)(&(&slave->notify_work)->work) FD: 151 BD: 3768 +.+.: (work_completion)(&(&slave->notify_work)->work) ->&obj_hash[i].lock ->&base->lock ->rtnl_mutex.wait_lock ->&p->pi_lock ->&rq->__lock ->pool_lock#2 ->rcu_node_0 ->&cfs_rq->removed.lock ->&rcu_state.expedited_wq ->stock_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 33 BD: 1 ..-.: &(&slave->notify_work)->timer FD: 33 BD: 1 ..-.: &(&bat_priv->mcast.work)->timer FD: 51 BD: 2 +.+.: (work_completion)(&(&bat_priv->mcast.work)->work) ->pool_lock#2 ->&bat_priv->mcast.mla_lock ->&obj_hash[i].lock ->&base->lock ->kfence_freelist_lock ->&meta->lock ->rcu_node_0 ->&rq->__lock ->&cfs_rq->removed.lock ->&rcu_state.expedited_wq ->quarantine_lock FD: 47 BD: 3 +.+.: &bat_priv->mcast.mla_lock ->pool_lock#2 ->key#16 ->&bat_priv->tt.changes_list_lock ->&bat_priv->tvlv.container_list_lock ->&obj_hash[i].lock ->&c->lock ->&n->list_lock ->(console_sem).lock ->console_owner_lock FD: 182 BD: 73 +.+.: team->team_lock_key ->fs_reclaim ->pool_lock#2 ->netpoll_srcu ->net_rwsem ->&tn->lock ->_xmit_ETHER ->&dir->lock#2 ->input_pool.lock ->&c->lock ->&ndev->lock ->&____s->seqcount ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&in_dev->mc_tomb_lock ->&im->lock ->cbs_list_lock ->sysfs_symlink_target_lock ->lock ->&root->kernfs_rwsem ->&n->list_lock ->lweventlist_lock ->(console_sem).lock FD: 152 BD: 1 +.+.: (wq_completion)bond0#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond0#3 ->(work_completion)(&(&slave->notify_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond0#4 ->(work_completion)(&(&slave->notify_work)->work) FD: 42 BD: 75 +.+.: &hard_iface->bat_iv.ogm_buff_mutex ->crngs.lock ->pool_lock#2 ->batched_entropy_u8.lock ->&bat_priv->forw_bat_list_lock ->&obj_hash[i].lock ->&c->lock ->rcu_node_0 ->&rq->__lock ->&____s->seqcount ->&rcu_state.expedited_wq ->&bat_priv->tt.commit_lock ->&bat_priv->tvlv.container_list_lock ->&n->list_lock ->&____s->seqcount#2 ->&cfs_rq->removed.lock ->kfence_freelist_lock FD: 33 BD: 1 ..-.: &(&bat_priv->orig_work)->timer FD: 32 BD: 2 +.+.: (work_completion)(&(&bat_priv->orig_work)->work) ->key#19 ->&obj_hash[i].lock ->&base->lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&rq->__lock ->pool_lock#2 ->&cfs_rq->removed.lock FD: 1 BD: 3 +...: key#19 FD: 13 BD: 76 +...: &bat_priv->forw_bat_list_lock ->&obj_hash[i].lock ->&base->lock FD: 152 BD: 1 +.+.: (wq_completion)bond0#5 ->(work_completion)(&(&slave->notify_work)->work) FD: 31 BD: 1 ..-.: drivers/net/wireguard/ratelimiter.c:20 FD: 32 BD: 2 +.+.: (gc_work).work ->tk_core.seq.seqcount ->"ratelimiter_table_lock" ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock ->pool_lock#2 FD: 1 BD: 3 +.+.: "ratelimiter_table_lock" FD: 6 BD: 72 +...: _xmit_NONE ->&c->lock FD: 1 BD: 72 +...: lock#9 FD: 182 BD: 73 +.+.: team->team_lock_key#2 ->fs_reclaim ->netpoll_srcu ->net_rwsem ->&tn->lock ->_xmit_ETHER ->&dir->lock#2 ->input_pool.lock ->&ndev->lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&in_dev->mc_tomb_lock ->&im->lock ->cbs_list_lock ->sysfs_symlink_target_lock ->lock ->&root->kernfs_rwsem ->&c->lock ->&____s->seqcount ->lweventlist_lock ->(console_sem).lock ->pool_lock#2 ->&n->list_lock FD: 447 BD: 73 +.+.: team->team_lock_key#3 ->fs_reclaim ->netpoll_srcu ->net_rwsem ->&tn->lock ->_xmit_ETHER ->&dir->lock#2 ->input_pool.lock ->&c->lock ->&ndev->lock ->&____s->seqcount ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&in_dev->mc_tomb_lock ->&im->lock ->cbs_list_lock ->sysfs_symlink_target_lock ->lock ->&root->kernfs_rwsem ->lweventlist_lock ->(console_sem).lock ->&rq->__lock ->pool_lock#2 ->&n->list_lock ->quarantine_lock ->rlock-AF_NETLINK ->&vlan_netdev_addr_lock_key/1 ->&macvlan_netdev_addr_lock_key/2 ->pcpu_alloc_mutex ->&idev->mc_lock ->&tb->tb6_lock ->(inet6addr_validator_chain).rwsem ->stock_lock ->&net->ipv6.addrconf_hash_lock ->pcpu_lock ->&ifa->lock ->&pn->hash_lock ->&dev->tx_global_lock ->dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 ->&sch->q.lock ->__ip_vs_mutex ->krc.lock ->&tbl->lock ->class ->(&tbl->proxy_timer) ->&base->lock ->flowtable_lock ->&dir->lock ->&idev->mc_query_lock ->(work_completion)(&(&idev->mc_report_work)->work) ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&____s->seqcount#2 FD: 152 BD: 1 +.+.: (wq_completion)bond0#6 ->(work_completion)(&(&slave->notify_work)->work) FD: 1 BD: 81 +..-: &____s->seqcount#12 FD: 447 BD: 73 +.+.: team->team_lock_key#4 ->fs_reclaim ->netpoll_srcu ->net_rwsem ->&tn->lock ->_xmit_ETHER ->&dir->lock#2 ->input_pool.lock ->&ndev->lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&in_dev->mc_tomb_lock ->&im->lock ->cbs_list_lock ->sysfs_symlink_target_lock ->lock ->&root->kernfs_rwsem ->&c->lock ->&____s->seqcount ->&n->list_lock ->lweventlist_lock ->(console_sem).lock ->pool_lock#2 ->&rq->__lock ->&vlan_netdev_addr_lock_key/1 ->&____s->seqcount#2 ->&macvlan_netdev_addr_lock_key/2 ->pcpu_alloc_mutex ->&idev->mc_lock ->&tb->tb6_lock ->(inet6addr_validator_chain).rwsem ->stock_lock ->&net->ipv6.addrconf_hash_lock ->pcpu_lock ->&ifa->lock ->&pn->hash_lock ->&dev->tx_global_lock ->dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 ->&sch->q.lock ->quarantine_lock ->__ip_vs_mutex ->krc.lock ->&tbl->lock ->class ->(&tbl->proxy_timer) ->&base->lock ->flowtable_lock ->&dir->lock ->&idev->mc_query_lock ->(work_completion)(&(&idev->mc_report_work)->work) FD: 446 BD: 73 +.+.: team->team_lock_key#5 ->fs_reclaim ->netpoll_srcu ->net_rwsem ->&tn->lock ->_xmit_ETHER ->&dir->lock#2 ->&c->lock ->input_pool.lock ->&____s->seqcount ->pool_lock#2 ->&ndev->lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&in_dev->mc_tomb_lock ->&im->lock ->cbs_list_lock ->sysfs_symlink_target_lock ->lock ->&root->kernfs_rwsem ->lweventlist_lock ->(console_sem).lock ->quarantine_lock ->&rq->__lock ->&n->list_lock ->krc.lock ->rcu_node_0 ->&dev_addr_list_lock_key#2/1 ->&pn->hash_lock ->&dev->tx_global_lock ->&sch->q.lock ->&base->lock ->__ip_vs_mutex ->&tbl->lock ->class ->(&tbl->proxy_timer) ->flowtable_lock ->&dir->lock ->&tb->tb6_lock ->&ul->lock#2 ->&net->ipv6.addrconf_hash_lock ->&ifa->lock ->&idev->mc_lock ->&idev->mc_query_lock ->(work_completion)(&(&idev->mc_report_work)->work) ->pcpu_alloc_mutex ->(inet6addr_validator_chain).rwsem ->stock_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->pcpu_lock FD: 182 BD: 73 +.+.: team->team_lock_key#6 ->fs_reclaim ->netpoll_srcu ->net_rwsem ->&tn->lock ->_xmit_ETHER ->&dir->lock#2 ->input_pool.lock ->&ndev->lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&c->lock ->&in_dev->mc_tomb_lock ->&im->lock ->cbs_list_lock ->sysfs_symlink_target_lock ->lock ->&root->kernfs_rwsem ->&____s->seqcount ->lweventlist_lock ->(console_sem).lock ->&rq->__lock ->&n->list_lock FD: 33 BD: 1 ..-.: &(&hdev->cmd_timer)->timer FD: 1 BD: 3720 +.-.: &hsr->list_lock FD: 44 BD: 16 +.+.: (work_completion)(&(&hdev->cmd_timer)->work) ->(console_sem).lock ->console_owner_lock ->console_owner ->rcu_node_0 ->&rcu_state.expedited_wq ->&rq->__lock FD: 10 BD: 3716 +...: &vlan_netdev_addr_lock_key ->pool_lock#2 ->&c->lock ->&n->list_lock ->&____s->seqcount#2 ->&____s->seqcount FD: 33 BD: 1 ..-.: &(&forw_packet_aggr->delayed_work)->timer FD: 51 BD: 2 +.+.: (work_completion)(&(&forw_packet_aggr->delayed_work)->work) ->&hard_iface->bat_iv.ogm_buff_mutex ->&bat_priv->forw_bat_list_lock ->&obj_hash[i].lock ->pool_lock#2 ->&rq->__lock ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->quarantine_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&n->list_lock ->&cfs_rq->removed.lock ->&meta->lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&base->lock FD: 21 BD: 72 +.-.: (&app->join_timer) ->&app->lock ->&list->lock#11 FD: 16 BD: 74 +.-.: &app->lock ->batched_entropy_u32.lock ->&obj_hash[i].lock ->&base->lock ->pool_lock#2 ->&list->lock#11 ->&c->lock ->&n->list_lock ->&____s->seqcount#2 ->&____s->seqcount ->init_task.mems_allowed_seq.seqcount ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 1 BD: 75 ..-.: &list->lock#11 FD: 19 BD: 72 +.-.: (&app->join_timer)#2 ->&app->lock#2 ->&list->lock#12 ->batched_entropy_u32.lock ->&obj_hash[i].lock ->&base->lock FD: 8 BD: 73 +.-.: &app->lock#2 ->pool_lock#2 ->&list->lock#12 ->&c->lock FD: 1 BD: 74 ..-.: &list->lock#12 FD: 8 BD: 3716 +...: &macvlan_netdev_addr_lock_key ->pool_lock#2 ->&c->lock ->&____s->seqcount FD: 8 BD: 3716 +...: &dev_addr_list_lock_key#3 ->pool_lock#2 ->&c->lock ->&____s->seqcount FD: 1 BD: 72 ....: &xa->xa_lock#14 FD: 1 BD: 3716 +...: &dev_addr_list_lock_key#3/1 FD: 2 BD: 72 +.+.: &tap_major->minor_lock ->pool_lock#2 FD: 3 BD: 72 +.+.: subsys mutex#82 ->&k->k_lock FD: 897 BD: 1 .+.+: kn->active#51 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->nsim_bus_dev_list_lock ->&c->lock ->&____s->seqcount ->nsim_bus_dev_list_lock.wait_lock ->&p->pi_lock FD: 895 BD: 9 +.+.: nsim_bus_dev_list_lock ->fs_reclaim ->pool_lock#2 ->nsim_bus_dev_ids.xa_lock ->&x->wait#9 ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount ->&k->list_lock ->lock ->&root->kernfs_rwsem ->&sem->wait_lock ->&p->pi_lock ->bus_type_sem ->sysfs_symlink_target_lock ->&k->k_lock ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->uevent_sock_mutex.wait_lock ->&rq->__lock ->nsim_bus_dev_list_lock.wait_lock ->subsys mutex#83 FD: 897 BD: 1 .+.+: kn->active#52 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->nsim_bus_dev_list_lock ->nsim_bus_dev_list_lock.wait_lock ->&p->pi_lock ->&rq->__lock ->&c->lock FD: 1 BD: 10 ....: nsim_bus_dev_ids.xa_lock FD: 2 BD: 18 +.+.: devlinks.xa_lock ->pool_lock#2 FD: 876 BD: 12 +.+.: &devlink->lock_key ->crngs.lock ->fs_reclaim ->pool_lock#2 ->devlinks.xa_lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&c->lock ->&____s->seqcount ->&xa->xa_lock#15 ->&n->list_lock ->pcpu_alloc_mutex ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&base->lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->batched_entropy_u32.lock ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&(&fn_net->fib_chain)->lock ->&devlink_port->type_lock ->stack_depot_init_mutex ->&nsim_trap_data->trap_lock ->&cfs_rq->removed.lock FD: 9 BD: 18 +.+.: &xa->xa_lock#15 ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock FD: 1 BD: 10 +.+.: nsim_bus_dev_list_lock.wait_lock FD: 1 BD: 3883 +...: &data->fib_event_queue_lock FD: 1 BD: 18 ....: &(&fn_net->fib_chain)->lock FD: 136 BD: 14 +.+.: (work_completion)(&data->fib_event_work) ->&data->fib_event_queue_lock ->&data->fib_lock ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 134 BD: 15 +.+.: &data->fib_lock ->fs_reclaim ->&c->lock ->pool_lock#2 ->&obj_hash[i].lock ->&base->lock ->&pool->lock ->&rq->__lock ->(&timer.timer) ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&____s->seqcount ->pool_lock ->remove_cache_srcu ->&____s->seqcount#2 ->&cfs_rq->removed.lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&meta->lock FD: 1 BD: 72 +...: &devlink_port->type_lock FD: 132 BD: 72 ++++: bpf_devs_lock ->fs_reclaim ->&c->lock ->&n->list_lock ->pool_lock#2 ->rcu_node_0 ->&rq->__lock ->remove_cache_srcu ->stock_lock ->&____s->seqcount ->&obj_hash[i].lock ->&nmap->mutex FD: 1 BD: 72 +.+.: (work_completion)(&(&devlink_port->type_warn_dw)->work) FD: 1 BD: 72 +.+.: &vn->sock_lock FD: 1 BD: 10 +.+.: subsys mutex#83 FD: 876 BD: 12 +.+.: &devlink->lock_key#2 ->crngs.lock ->fs_reclaim ->devlinks.xa_lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&xa->xa_lock#15 ->&c->lock ->&____s->seqcount ->pool_lock#2 ->pcpu_alloc_mutex ->&n->list_lock ->&base->lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->batched_entropy_u32.lock ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->rcu_node_0 ->&rq->__lock ->&(&fn_net->fib_chain)->lock ->&devlink_port->type_lock ->stack_depot_init_mutex ->quarantine_lock ->&nsim_trap_data->trap_lock ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock FD: 144 BD: 72 +.+.: devnet_rename_sem ->(console_sem).lock ->&rq->__lock ->fs_reclaim ->pool_lock#2 ->&k->list_lock ->&root->kernfs_rwsem ->kernfs_rename_lock ->uevent_sock_mutex ->&obj_hash[i].lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&c->lock ->&n->list_lock ->&____s->seqcount ->remove_cache_srcu FD: 31 BD: 1 ..-.: &(&nsim_dev->trap_data->trap_report_dw)->timer FD: 38 BD: 14 +.+.: (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock FD: 1 BD: 303 ....: kernfs_rename_lock FD: 297 BD: 74 +.+.: &nft_net->commit_mutex ->fs_reclaim ->stock_lock ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->pool_lock#2 ->&obj_hash[i].lock ->batched_entropy_u32.lock ->(console_sem).lock ->&rq->__lock ->&n->list_lock ->(work_completion)(&ht->run_work) ->&ht->mutex ->nl_table_lock ->nl_table_wait.lock ->rlock-AF_NETLINK ->&p->alloc_lock ->&rnp->exp_lock ->rcu_state.exp_mutex ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock ->(work_completion)(&(&priv->gc_work)->work) ->nf_ct_proto_mutex ->defrag4_mutex ->cpu_hotplug_lock ->krc.lock FD: 31 BD: 1 ..-.: &(&hwstats->traffic_dw)->timer FD: 32 BD: 14 +.+.: (work_completion)(&(&hwstats->traffic_dw)->work) ->&hwstats->hwsdev_list_lock ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 28 BD: 73 +.+.: &hwstats->hwsdev_list_lock ->&rq->__lock FD: 500 BD: 72 +.+.: &wg->device_update_lock ->&wg->static_identity.lock ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->&c->lock ->pcpu_alloc_mutex ->&handshake->lock ->&obj_hash[i].lock ->tk_core.seq.seqcount ->&table->lock ->&peer->endpoint_lock ->&rq->__lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#8 ->&dir->lock ->k-sk_lock-AF_INET ->k-slock-AF_INET ->cpu_hotplug_lock ->k-sk_lock-AF_INET6 ->k-slock-AF_INET6 ->&wg->socket_update_lock ->&list->lock#14 ->&pool->lock/1 ->remove_cache_srcu ->&n->list_lock ->&____s->seqcount#2 ->stock_lock ->&wq->mutex ->wq_pool_mutex ->&cfs_rq->removed.lock ->wq_mayday_lock ->&p->pi_lock ->&x->wait ->pcpu_lock ->&r->consumer_lock#2 ->rcu_state.barrier_mutex ->init_lock ->&zone->lock ->&peer->keypairs.keypair_update_lock ->(&peer->timer_retransmit_handshake) ->&base->lock ->(&peer->timer_send_keepalive) ->(&peer->timer_new_handshake) ->(&peer->timer_zero_key_material) ->(&peer->timer_persistent_keepalive) ->(work_completion)(&peer->clear_peer_work) ->(wq_completion)wg-crypt-wg1#3 ->napi_hash_lock ->(wq_completion)wg-kex-wg1#5 ->&table->lock#2 ->pcpu_alloc_mutex.wait_lock ->&table->hash[i].lock ->k-clock-AF_INET ->&xa->xa_lock#8 ->&fsnotify_mark_srcu ->k-clock-AF_INET6 ->(console_sem).lock FD: 133 BD: 129 ++++: &wg->static_identity.lock ->&rq->__lock ->&handshake->lock ->&sem->wait_lock ->&p->pi_lock ->tk_core.seq.seqcount ->&peer->keypairs.keypair_update_lock FD: 132 BD: 131 ++++: &handshake->lock ->&rq->__lock ->crngs.lock ->tk_core.seq.seqcount ->&table->lock#2 ->fs_reclaim ->pool_lock#2 ->&c->lock ->&____s->seqcount ->&____s->seqcount#2 ->remove_cache_srcu ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&sem->wait_lock FD: 1 BD: 73 +.+.: &table->lock FD: 99 BD: 132 ++-.: &peer->endpoint_lock ->pool_lock#2 ->&obj_hash[i].lock FD: 883 BD: 12 +.+.: &devlink->lock_key#3 ->crngs.lock ->fs_reclaim ->devlinks.xa_lock ->&c->lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&____s->seqcount ->pool_lock#2 ->rcu_node_0 ->&rq->__lock ->&xa->xa_lock#15 ->pcpu_alloc_mutex ->&n->list_lock ->&base->lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->batched_entropy_u32.lock ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&(&fn_net->fib_chain)->lock ->&devlink_port->type_lock ->stack_depot_init_mutex ->&nsim_trap_data->trap_lock ->&rcu_state.expedited_wq ->&dentry->d_lock ->&fsnotify_mark_srcu ->&sb->s_type->i_lock_key#7 ->&s->s_inode_list_lock ->&xa->xa_lock#8 ->mount_lock ->rcu_state.barrier_mutex ->dev_base_lock ->lweventlist_lock ->netdev_unregistering_wq.lock ->krc.lock ->&dir->lock#2 ->(work_completion)(&(&devlink_port->type_warn_dw)->work) ->(work_completion)(&(&hwstats->traffic_dw)->work) ->&hwstats->hwsdev_list_lock ->&rnp->exp_lock ->rcu_state.exp_mutex ->(work_completion)(&data->fib_flush_work) ->(work_completion)(&data->fib_event_work) ->(work_completion)(&ht->run_work) ->&ht->mutex ->(work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) ->pcpu_lock ->®ion->snapshot_lock ->stock_lock ->&____s->seqcount#2 ->&x->wait#10 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->quarantine_lock ->remove_cache_srcu FD: 33 BD: 20 +.+.: &nsim_trap_data->trap_lock ->pool_lock#2 ->&c->lock ->&____s->seqcount ->crngs.lock ->&nsim_dev->fa_cookie_lock ->&obj_hash[i].lock ->&n->list_lock ->&zone->lock ->quarantine_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&____s->seqcount#2 ->&base->lock ->&pgdat->kswapd_wait FD: 1 BD: 21 +...: &nsim_dev->fa_cookie_lock FD: 1 BD: 72 +...: _xmit_SIT FD: 19 BD: 3717 +...: &bridge_netdev_addr_lock_key/1 ->pool_lock#2 ->&c->lock ->&____s->seqcount ->&obj_hash[i].lock ->krc.lock ->&n->list_lock FD: 40 BD: 72 +.-.: (&brmctx->ip6_own_query.timer) ->&br->multicast_lock FD: 39 BD: 3727 +.-.: &br->multicast_lock ->&obj_hash[i].lock ->&base->lock ->pool_lock#2 ->&dir->lock#2 ->deferred_lock ->nl_table_lock ->nl_table_wait.lock ->&c->lock ->&____s->seqcount ->&n->list_lock ->&____s->seqcount#2 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->quarantine_lock ->init_task.mems_allowed_seq.seqcount FD: 40 BD: 72 +.-.: (&brmctx->ip4_own_query.timer) ->&br->multicast_lock FD: 876 BD: 12 +.+.: &devlink->lock_key#4 ->crngs.lock ->fs_reclaim ->devlinks.xa_lock ->&c->lock ->&____s->seqcount ->pool_lock#2 ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&xa->xa_lock#15 ->&n->list_lock ->pcpu_alloc_mutex ->&base->lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->batched_entropy_u32.lock ->rtnl_mutex ->rcu_node_0 ->&rq->__lock ->&(&fn_net->fib_chain)->lock ->rtnl_mutex.wait_lock ->&p->pi_lock ->&devlink_port->type_lock ->stack_depot_init_mutex ->&nsim_trap_data->trap_lock ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock FD: 1 BD: 72 +...: _xmit_TUNNEL FD: 1 BD: 6 +.+.: genl_mutex.wait_lock FD: 19 BD: 3716 +...: _xmit_IPGRE ->&c->lock ->&____s->seqcount ->pool_lock#2 ->&obj_hash[i].lock ->krc.lock FD: 66 BD: 1 +.-.: (&in_dev->mr_ifc_timer) ->&obj_hash[i].lock ->batched_entropy_u32.lock ->&base->lock FD: 31 BD: 1 ..-.: &(&br->gc_work)->timer FD: 45 BD: 73 +.+.: (work_completion)(&(&br->gc_work)->work) ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock FD: 1 BD: 72 +...: _xmit_TUNNEL6 FD: 876 BD: 12 +.+.: &devlink->lock_key#5 ->crngs.lock ->fs_reclaim ->devlinks.xa_lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&c->lock ->&____s->seqcount ->&xa->xa_lock#15 ->&n->list_lock ->pcpu_alloc_mutex ->&base->lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->batched_entropy_u32.lock ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->rcu_node_0 ->&rq->__lock ->&(&fn_net->fib_chain)->lock ->&devlink_port->type_lock ->stack_depot_init_mutex ->pool_lock#2 ->&nsim_trap_data->trap_lock ->&rcu_state.expedited_wq FD: 73 BD: 3719 +.-.: _xmit_TUNNEL6#2 ->&obj_hash[i].lock ->pool_lock#2 ->&____s->seqcount ->icmp_global.lock FD: 48 BD: 3716 +...: &dev_addr_list_lock_key/1 ->&c->lock ->&____s->seqcount ->pool_lock#2 ->&batadv_netdev_addr_lock_key ->&obj_hash[i].lock ->krc.lock ->&bridge_netdev_addr_lock_key ->&n->list_lock ->&____s->seqcount#2 ->&zone->lock FD: 2 BD: 1 +.-.: (&tun->flow_gc_timer) ->&tun->lock FD: 1 BD: 73 +.-.: &tun->lock FD: 876 BD: 12 +.+.: &devlink->lock_key#6 ->crngs.lock ->fs_reclaim ->devlinks.xa_lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&xa->xa_lock#15 ->&c->lock ->&n->list_lock ->&____s->seqcount ->pcpu_alloc_mutex ->&base->lock ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->&rq->__lock ->batched_entropy_u32.lock ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->rcu_node_0 ->&(&fn_net->fib_chain)->lock ->&devlink_port->type_lock ->stack_depot_init_mutex ->&nsim_trap_data->trap_lock ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock FD: 45 BD: 3716 +...: &dev_addr_list_lock_key#2/1 ->_xmit_ETHER FD: 17 BD: 72 +.-.: (&app->periodic_timer) ->&app->lock FD: 40 BD: 1 +.-.: (&pmctx->ip6_own_query.timer) ->&br->multicast_lock FD: 40 BD: 1 +.-.: (&pmctx->ip4_own_query.timer) ->&br->multicast_lock FD: 31 BD: 1 ..-.: &(&conn->info_timer)->timer FD: 139 BD: 24 +.+.: (work_completion)(&(&conn->info_timer)->work) ->&conn->chan_lock FD: 7 BD: 3716 +...: _xmit_ETHER/1 ->&c->lock ->&____s->seqcount FD: 40 BD: 3718 +.-.: &hsr->seqnr_lock ->pool_lock#2 ->&obj_hash[i].lock ->&meta->lock ->kfence_freelist_lock ->quarantine_lock ->&base->lock FD: 1 BD: 3719 +.-.: &new_node->seq_out_lock FD: 41 BD: 72 +.-.: (&hsr->announce_timer) FD: 1 BD: 72 +.+.: &nn->netlink_tap_lock FD: 19 BD: 3716 +...: &batadv_netdev_addr_lock_key/1 ->&c->lock ->&____s->seqcount ->&obj_hash[i].lock ->krc.lock FD: 45 BD: 3717 +...: &vlan_netdev_addr_lock_key/1 ->_xmit_ETHER ->pool_lock#2 ->&c->lock ->&____s->seqcount ->&____s->seqcount#2 ->&obj_hash[i].lock ->krc.lock ->&n->list_lock FD: 14 BD: 72 +.-.: (&hsr->prune_timer) ->&hsr->list_lock ->&obj_hash[i].lock ->&base->lock FD: 45 BD: 3717 +...: &macvlan_netdev_addr_lock_key/1 ->_xmit_ETHER ->&c->lock ->&____s->seqcount ->&____s->seqcount#2 ->&obj_hash[i].lock ->krc.lock FD: 2 BD: 73 +...: &ipvlan->addrs_lock ->pool_lock#2 FD: 1 BD: 73 +.-.: &list->lock#13 FD: 31 BD: 72 +.+.: (work_completion)(&port->bc_work) ->&list->lock#13 ->&obj_hash[i].lock ->pool_lock#2 ->&rq->__lock ->quarantine_lock ->&meta->lock ->kfence_freelist_lock FD: 45 BD: 3716 +...: &macsec_netdev_addr_lock_key/1 ->&c->lock ->&____s->seqcount ->_xmit_ETHER FD: 24 BD: 75 +...: dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 ->&sch->q.lock FD: 1 BD: 3718 +...: key#20 FD: 26 BD: 76 +...: &bat_priv->tt.commit_lock ->key#16 ->&bat_priv->softif_vlan_list_lock ->&bat_priv->tt.changes_list_lock ->&bat_priv->tt.last_changeset_lock ->pool_lock#2 ->&bat_priv->tvlv.container_list_lock ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount ->&____s->seqcount#2 ->&n->list_lock ->&meta->lock ->kfence_freelist_lock ->quarantine_lock FD: 1 BD: 3718 +...: &entry->crc_lock FD: 28 BD: 73 +.+.: &wg->socket_update_lock ->&rq->__lock FD: 33 BD: 1 ..-.: &(&bat_priv->tt.work)->timer FD: 7 BD: 149 +.-.: &list->lock#14 ->&obj_hash[i].lock ->pool_lock#2 ->&meta->lock ->kfence_freelist_lock ->quarantine_lock FD: 130 BD: 1 +.+.: (wq_completion)wg-kex-wg0 ->(work_completion)(&peer->transmit_handshake_work) FD: 129 BD: 91 +.+.: (work_completion)(&peer->transmit_handshake_work) ->tk_core.seq.seqcount ->&wg->static_identity.lock ->&cookie->lock ->&obj_hash[i].lock ->&base->lock ->pool_lock#2 ->&peer->endpoint_lock ->batched_entropy_u8.lock ->&rq->__lock ->&c->lock ->&n->list_lock ->&____s->seqcount#2 ->&____s->seqcount ->kfence_freelist_lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock FD: 1 BD: 133 +...: &table->lock#2 FD: 28 BD: 129 ++++: &cookie->lock ->&rq->__lock FD: 39 BD: 2 +.+.: (work_completion)(&(&bat_priv->tt.work)->work) ->key#16 ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->key#21 ->&bat_priv->tt.req_list_lock ->&bat_priv->tt.roam_list_lock ->&base->lock ->pool_lock#2 ->rcu_node_0 ->&rcu_state.expedited_wq FD: 1 BD: 3 +...: key#21 FD: 1 BD: 3 +...: &bat_priv->tt.req_list_lock FD: 1 BD: 3 +...: &bat_priv->tt.roam_list_lock FD: 131 BD: 1 +.+.: (wq_completion)wg-kex-wg1 ->(work_completion)(&peer->transmit_handshake_work) ->(work_completion)(&peer->clear_peer_work) FD: 1 BD: 148 +.-.: &r->producer_lock#2 FD: 193 BD: 1 +.+.: (wq_completion)wg-kex-wg0#2 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 192 BD: 109 +.+.: (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->&r->consumer_lock#2 ->&wg->static_identity.lock ->&peer->endpoint_lock ->tk_core.seq.seqcount ->&cookie->lock ->&handshake->lock ->&obj_hash[i].lock ->&base->lock ->pool_lock#2 ->&rq->__lock ->&list->lock#14 ->&c->lock ->rcu_node_0 ->&rcu_state.expedited_wq ->&____s->seqcount#2 ->&____s->seqcount ->&n->list_lock ->&meta->lock ->kfence_freelist_lock ->batched_entropy_u8.lock ->&sem->wait_lock ->&p->pi_lock FD: 1 BD: 110 +.+.: &r->consumer_lock#2 FD: 5 BD: 132 +.-.: &peer->keypairs.keypair_update_lock ->&table->lock#2 ->&obj_hash[i].lock ->pool_lock#2 FD: 193 BD: 1 +.+.: (wq_completion)wg-kex-wg1#2 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 194 BD: 1 +.+.: (wq_completion)wg-crypt-wg1 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 29 BD: 91 +.+.: (work_completion)(&peer->transmit_packet_work) ->&obj_hash[i].lock ->&peer->endpoint_lock ->&base->lock ->batched_entropy_u8.lock ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 194 BD: 1 +.+.: (wq_completion)wg-crypt-wg0 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 1 BD: 1 +.-.: &keypair->receiving_counter.lock FD: 131 BD: 1 +.+.: (wq_completion)wg-kex-wg2 ->(work_completion)(&peer->transmit_handshake_work) ->(work_completion)(&peer->clear_peer_work) FD: 129 BD: 11 +.+.: &data->mtx ->fs_reclaim ->pool_lock#2 ->&rfkill->lock ->&c->lock ->&____s->seqcount ->&____s->seqcount#2 FD: 193 BD: 1 +.+.: (wq_completion)wg-kex-wg2#2 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 194 BD: 1 +.+.: (wq_completion)wg-crypt-wg2 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 130 BD: 1 +.+.: (wq_completion)wg-kex-wg0#3 ->(work_completion)(&peer->transmit_handshake_work) FD: 108 BD: 1 +.-.: (&ndev->rs_timer) ->&ndev->lock ->pool_lock#2 ->&dir->lock#2 ->&ul->lock#2 ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&n->list_lock ->init_task.mems_allowed_seq.seqcount ->batched_entropy_u8.lock ->kfence_freelist_lock ->key#29 FD: 130 BD: 1 +.+.: (wq_completion)wg-kex-wg1#3 ->(work_completion)(&peer->transmit_handshake_work) FD: 193 BD: 1 +.+.: (wq_completion)wg-kex-wg0#4 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 193 BD: 1 +.+.: (wq_completion)wg-kex-wg1#4 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 194 BD: 1 +.+.: (wq_completion)wg-crypt-wg1#2 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 194 BD: 1 +.+.: (wq_completion)wg-crypt-wg0#2 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 130 BD: 1 +.+.: (wq_completion)wg-kex-wg2#3 ->(work_completion)(&peer->transmit_handshake_work) FD: 193 BD: 1 +.+.: (wq_completion)wg-kex-wg2#4 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 194 BD: 1 +.+.: (wq_completion)wg-crypt-wg2#2 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 1 BD: 79 ....: &wdev->event_lock FD: 1 BD: 75 +.+.: (work_completion)(&(&sdata->dec_tailroom_needed_wk)->work) FD: 34 BD: 80 +.+.: &local->key_mtx ->&obj_hash[i].lock FD: 31 BD: 81 ..-.: &rdev->wiphy_work_lock FD: 1 BD: 75 ....: (&dwork->timer) FD: 1 BD: 75 +.+.: (work_completion)(&(&link->color_collision_detect_work)->work) FD: 325 BD: 2 +.+.: (work_completion)(&rdev->wiphy_work) ->&rdev->wiphy.mtx ->&lock->wait_lock ->&p->pi_lock ->&rq->__lock FD: 21 BD: 1 +.+.: (wq_completion)phy3 ->(work_completion)(&local->reconfig_filter) FD: 1 BD: 80 ..-.: &list->lock#15 FD: 1 BD: 79 +.-.: &ifibss->incomplete_lock FD: 10 BD: 77 +...: &bat_priv->tt.last_changeset_lock ->&c->lock ->&____s->seqcount ->pool_lock#2 ->&obj_hash[i].lock FD: 143 BD: 84 +.+.: &local->mtx ->&local->chanctx_mtx ->&____s->seqcount ->pool_lock#2 ->fs_reclaim ->&c->lock ->&local->ack_status_lock ->&local->queue_stop_reason_lock ->rcu_node_0 ->&rq->__lock ->&n->list_lock ->&____s->seqcount#2 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&obj_hash[i].lock ->&base->lock ->nl_table_lock ->nl_table_wait.lock ->&lock->wait_lock ->quarantine_lock ->remove_cache_srcu ->&cfs_rq->removed.lock ->&data->mutex ->&meta->lock FD: 873 BD: 1 +.+.: (wq_completion)cfg80211 ->(work_completion)(&rdev->event_work) ->(work_completion)(&(&rdev->dfs_update_channels_wk)->work) FD: 325 BD: 2 +.+.: (work_completion)(&rdev->event_work) ->&rdev->wiphy.mtx ->&lock->wait_lock ->&p->pi_lock FD: 149 BD: 2 +.+.: wireless_nlevent_work ->net_rwsem FD: 181 BD: 1 +.+.: (wq_completion)phy4 ->(work_completion)(&local->reconfig_filter) ->(work_completion)(&link->csa_finalize_work) FD: 1 BD: 3791 +.-.: &local->active_txq_lock[i] FD: 38 BD: 3785 +.-.: &local->handle_wake_tx_queue_lock ->&local->active_txq_lock[i] ->&local->queue_stop_reason_lock ->&fq->lock ->tk_core.seq.seqcount ->hwsim_radio_lock ->&list->lock#16 FD: 1 BD: 3799 ..-.: &local->queue_stop_reason_lock FD: 1 BD: 3801 ..-.: &list->lock#16 FD: 147 BD: 1 +.+.: (wq_completion)phy5 ->(work_completion)(&local->reconfig_filter) ->(work_completion)(&(&local->roc_work)->work) FD: 39 BD: 1 +.-.: &local->rx_path_lock ->&list->lock#15 ->&rdev->wiphy_work_lock ->&obj_hash[i].lock ->pool_lock#2 ->&rdev->mgmt_registrations_lock ->&c->lock ->&n->list_lock ->&local->queue_stop_reason_lock ->tk_core.seq.seqcount ->hwsim_radio_lock ->&list->lock#16 FD: 19 BD: 88 +...: &sta->lock ->pool_lock#2 ->&obj_hash[i].lock ->krc.lock ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount FD: 19 BD: 79 +.-.: &sta->rate_ctrl_lock ->pool_lock#2 ->&obj_hash[i].lock ->krc.lock ->&c->lock ->&n->list_lock FD: 158 BD: 79 +.+.: &local->sta_mtx ->fs_reclaim ->pool_lock#2 ->&local->chanctx_mtx ->pin_fs_lock ->&sb->s_type->i_mutex_key#3 ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&c->lock ->&____s->seqcount ->&____s->seqcount#2 ->&n->list_lock ->&sta->ampdu_mlme.mtx ->(work_completion)(&sta->ampdu_mlme.work) ->&rq->__lock ->&sta->lock ->krc.lock ->&local->key_mtx ->&fq->lock ->&dentry->d_lock ->&fsnotify_mark_srcu ->&sb->s_type->i_lock_key#7 ->&s->s_inode_list_lock ->&xa->xa_lock#8 ->mount_lock ->&local->active_txq_lock[i] ->(work_completion)(&sta->drv_deliver_wk) ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->quarantine_lock ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock ->remove_cache_srcu FD: 145 BD: 1 +.+.: &type->s_umount_key#46/1 ->fs_reclaim ->pcpu_alloc_mutex ->shrinker_rwsem ->list_lrus_mutex ->sb_lock ->&c->lock ->pool_lock#2 ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#32 ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->binderfs_minors_mutex ->&dentry->d_lock ->&sb->s_type->i_mutex_key#20 ->&____s->seqcount#2 ->&____s->seqcount FD: 42 BD: 3 +.+.: &sb->s_type->i_lock_key#32 ->&dentry->d_lock FD: 2 BD: 2 +.+.: binderfs_minors_mutex ->binderfs_minors.xa_lock FD: 1 BD: 3 ....: binderfs_minors.xa_lock FD: 131 BD: 2 +.+.: &sb->s_type->i_mutex_key#20 ->&sb->s_type->i_lock_key#32 ->rename_lock.seqcount ->fs_reclaim ->pool_lock#2 ->&dentry->d_lock ->mmu_notifier_invalidate_range_start ->&s->s_inode_list_lock ->tk_core.seq.seqcount ->&c->lock ->&____s->seqcount ->&____s->seqcount#2 FD: 1 BD: 3 +.+.: iunique_lock FD: 847 BD: 3 +.+.: &type->i_mutex_dir_key#6/1 ->rename_lock.seqcount ->fs_reclaim ->&dentry->d_lock ->&root->kernfs_rwsem ->tomoyo_ss ->&root->kernfs_iattr_rwsem ->cgroup_mutex FD: 1 BD: 16 ....: task_group_lock FD: 131 BD: 1 .+.+: kn->active#53 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->pool_lock#2 FD: 131 BD: 1 ++++: kn->active#54 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 3 BD: 146 ..-.: cgroup_threadgroup_rwsem.rss.gp_wait.lock ->&obj_hash[i].lock FD: 1 BD: 144 ....: cgroup_threadgroup_rwsem.waiters.lock FD: 1 BD: 17 +.+.: (wq_completion)cpuset_migrate_mm FD: 847 BD: 3 +.+.: &type->i_mutex_dir_key#7/1 ->rename_lock.seqcount ->fs_reclaim ->&c->lock ->&dentry->d_lock ->&root->kernfs_rwsem ->tomoyo_ss ->&root->kernfs_iattr_rwsem ->cgroup_mutex ->pool_lock#2 ->&xa->xa_lock#4 ->&obj_hash[i].lock ->stock_lock ->cgroup_mutex.wait_lock ->&p->pi_lock ->&rq->__lock ->&sem->wait_lock ->&sb->s_type->i_lock_key#31 ->&n->list_lock FD: 131 BD: 1 ++++: kn->active#55 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] ->stock_lock ->&c->lock ->&____s->seqcount ->pool_lock#2 ->&____s->seqcount#2 ->&n->list_lock FD: 1 BD: 147 ....: cpuset_attach_wq.lock FD: 2 BD: 4450 -.-.: stock_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 131 BD: 1 .+.+: kn->active#56 ->fs_reclaim ->stock_lock ->pool_lock#2 ->&kernfs_locks->open_file_mutex[count] FD: 132 BD: 1 .+.+: kn->active#57 ->fs_reclaim ->stock_lock ->&kernfs_locks->open_file_mutex[count] ->memcg_max_mutex ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&c->lock ->&____s->seqcount#2 ->&n->list_lock ->&____s->seqcount FD: 1 BD: 8 +.+.: memcg_max_mutex FD: 1 BD: 6 ....: &per_cpu(xt_recseq, i) FD: 182 BD: 1 +.+.: (wq_completion)phy6 ->(work_completion)(&local->reconfig_filter) ->(work_completion)(&(&local->roc_work)->work) ->(work_completion)(&link->csa_finalize_work) FD: 285 BD: 1 +.+.: nf_nat_proto_mutex ->fs_reclaim ->pool_lock#2 ->nf_hook_mutex ->cpu_hotplug_lock ->&obj_hash[i].lock ->stock_lock ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&rq->__lock FD: 1 BD: 105 ..-.: elock-AF_INET6 FD: 30 BD: 1 +.+.: loop_validate_mutex ->&lo->lo_mutex ->loop_validate_mutex.wait_lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 130 BD: 1 +.+.: (wq_completion)wg-kex-wg0#5 ->(work_completion)(&peer->transmit_handshake_work) FD: 1 BD: 1 ....: _rs.lock FD: 131 BD: 73 +.+.: (wq_completion)wg-kex-wg1#5 ->(work_completion)(&peer->transmit_handshake_work) ->(work_completion)(&peer->clear_peer_work) FD: 37 BD: 73 +.-.: (&peer->timer_persistent_keepalive) ->pool_lock#2 ->&list->lock#14 ->tk_core.seq.seqcount ->&c->lock ->&n->list_lock ->&____s->seqcount#2 ->&____s->seqcount ->batched_entropy_u8.lock ->kfence_freelist_lock ->init_task.mems_allowed_seq.seqcount FD: 193 BD: 1 +.+.: (wq_completion)wg-kex-wg0#6 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 193 BD: 1 +.+.: (wq_completion)wg-kex-wg1#6 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 194 BD: 73 +.+.: (wq_completion)wg-crypt-wg1#3 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) ->&rq->__lock FD: 194 BD: 1 +.+.: (wq_completion)wg-crypt-wg0#3 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) ->&rq->__lock FD: 131 BD: 1 +.+.: (wq_completion)wg-kex-wg2#5 ->(work_completion)(&peer->transmit_handshake_work) ->(work_completion)(&peer->clear_peer_work) FD: 193 BD: 1 +.+.: (wq_completion)wg-kex-wg2#6 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 194 BD: 1 +.+.: (wq_completion)wg-crypt-wg2#3 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 187 BD: 75 +.+.: sk_lock-AF_INET6/1 ->slock-AF_INET6 ->rlock-AF_INET6 ->&list->lock#17 ->&rq->__lock ->&obj_hash[i].lock ->&base->lock ->pool_lock#2 ->rcu_node_0 ->&rcu_state.expedited_wq ->krc.lock ->sctp_assocs_id_lock ->fs_reclaim ->&____s->seqcount ->tk_core.seq.seqcount ->&c->lock ->&list->lock#18 ->&zone->lock ->remove_cache_srcu ->&cfs_rq->removed.lock ->quarantine_lock FD: 1 BD: 77 +.-.: rlock-AF_INET6 FD: 1 BD: 78 ....: &list->lock#17 FD: 84 BD: 80 +.-.: slock-AF_INET6/1 ->&sctp_ep_hashtable[i].lock ->&obj_hash[i].lock ->pool_lock#2 ->clock-AF_INET6 ->krc.lock ->&sctp_port_hashtable[i].lock ->&____s->seqcount ->&c->lock ->&n->list_lock ->tk_core.seq.seqcount ->&base->lock ->&hashinfo->ehash_locks[i] ->hrtimer_bases.lock ->batched_entropy_u32.lock ->&tcp_hashinfo.bhash[i].lock ->&f->f_owner.lock ->elock-AF_INET6 ->key#26 FD: 1 BD: 86 ++.-: &sctp_ep_hashtable[i].lock FD: 1 BD: 4 +.+.: &q->instances_lock FD: 15 BD: 5 +...: &log->instances_lock ->&c->lock ->pool_lock#2 ->&obj_hash[i].lock ->&____s->seqcount ->&dir->lock ->&inst->lock FD: 4 BD: 7 +...: vsock_table_lock ->batched_entropy_u32.lock FD: 228 BD: 5 +.+.: sk_lock-AF_VSOCK ->slock-AF_VSOCK ->vsock_table_lock ->clock-AF_VSOCK ->rlock-AF_VSOCK ->fs_reclaim ->pool_lock#2 ->&vvs->rx_lock ->&list->lock#44 ->&pool->lock ->&ei->socket.wq.wait ->&rq->__lock ->&dir->lock ->&obj_hash[i].lock ->sk_lock-AF_VSOCK/1 ->&vvs->tx_lock ->&base->lock ->&mm->mmap_lock FD: 30 BD: 7 +...: slock-AF_VSOCK ->&sk->sk_lock.wq FD: 228 BD: 1 +.+.: sk_lock-AF_ALG ->slock-AF_ALG ->&mm->mmap_lock ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->&dir->lock ->&rq->__lock ->&ei->socket.wq.wait ->(console_sem).lock ->&c->lock ->&____s->seqcount ->rcu_node_0 ->&n->list_lock ->&____s->seqcount#2 ->sk_lock-AF_ALG/1 ->quarantine_lock ->remove_cache_srcu ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock ->&sem->wait_lock ->&p->pi_lock ->&drbg->drbg_mutex FD: 30 BD: 3 +...: slock-AF_ALG ->&sk->sk_lock.wq FD: 1 BD: 3770 +.-.: &nf_nat_locks[i] FD: 21 BD: 1 +.+.: (wq_completion)phy7 ->(work_completion)(&local->reconfig_filter) FD: 1 BD: 7 +...: clock-AF_VSOCK FD: 1 BD: 7 ....: rlock-AF_VSOCK FD: 131 BD: 1 +.+.: (wq_completion)wg-kex-wg0#7 ->(work_completion)(&peer->transmit_handshake_work) ->(work_completion)(&peer->clear_peer_work) FD: 130 BD: 1 +.+.: (wq_completion)wg-kex-wg0#8 ->(work_completion)(&peer->transmit_handshake_work) FD: 131 BD: 1 +.+.: (wq_completion)wg-kex-wg1#7 ->(work_completion)(&peer->transmit_handshake_work) ->(work_completion)(&peer->clear_peer_work) FD: 193 BD: 1 +.+.: (wq_completion)wg-kex-wg0#9 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 193 BD: 1 +.+.: (wq_completion)wg-kex-wg1#8 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 194 BD: 1 +.+.: (wq_completion)wg-crypt-wg1#4 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 21 BD: 1 +.+.: (wq_completion)phy8 ->(work_completion)(&local->reconfig_filter) FD: 194 BD: 1 +.+.: (wq_completion)wg-crypt-wg0#4 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 130 BD: 1 +.+.: (wq_completion)wg-kex-wg1#9 ->(work_completion)(&peer->transmit_handshake_work) FD: 193 BD: 1 +.+.: (wq_completion)wg-kex-wg0#10 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 193 BD: 1 +.+.: (wq_completion)wg-kex-wg1#10 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 194 BD: 1 +.+.: (wq_completion)wg-crypt-wg1#5 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) ->&rq->__lock FD: 131 BD: 1 +.+.: (wq_completion)wg-kex-wg2#7 ->(work_completion)(&peer->transmit_handshake_work) ->(work_completion)(&peer->clear_peer_work) FD: 194 BD: 1 +.+.: (wq_completion)wg-crypt-wg0#5 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 193 BD: 1 +.+.: (wq_completion)wg-kex-wg2#8 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 194 BD: 1 +.+.: (wq_completion)wg-crypt-wg2#4 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 130 BD: 1 +.+.: (wq_completion)wg-kex-wg2#9 ->(work_completion)(&peer->transmit_handshake_work) FD: 193 BD: 1 +.+.: (wq_completion)wg-kex-wg2#10 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 194 BD: 1 +.+.: (wq_completion)wg-crypt-wg2#5 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 21 BD: 1 +.+.: (wq_completion)phy9 ->(work_completion)(&local->reconfig_filter) FD: 131 BD: 1 +.+.: (wq_completion)wg-kex-wg0#11 ->(work_completion)(&peer->transmit_handshake_work) ->(work_completion)(&peer->clear_peer_work) FD: 131 BD: 1 +.+.: (wq_completion)wg-kex-wg1#11 ->(work_completion)(&peer->transmit_handshake_work) ->(work_completion)(&peer->clear_peer_work) FD: 193 BD: 1 +.+.: (wq_completion)wg-kex-wg0#12 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 193 BD: 1 +.+.: (wq_completion)wg-kex-wg1#12 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 194 BD: 1 +.+.: (wq_completion)wg-crypt-wg1#6 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 194 BD: 1 +.+.: (wq_completion)wg-crypt-wg0#6 ->&rq->__lock ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 173 BD: 1 +.+.: &net->xfrm.xfrm_cfg_mutex ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->rlock-AF_NETLINK ->&____s->seqcount#2 ->&____s->seqcount ->&c->lock ->(kmod_concurrent_max).lock ->&rq->__lock ->&x->wait#17 ->running_helpers_waitq.lock ->xfrm_state_gc_lock ->pfkey_mutex ->rlock-AF_KEY ->&(&net->xfrm.policy_hthresh.lock)->lock ->ipcomp_resource_mutex ->&net->xfrm.xfrm_state_lock ->&n->list_lock ->&pfk->dump_lock ->&net->xfrm.xfrm_policy_lock ->crypto_alg_sem ->(crypto_chain).rwsem ->&x->wait#21 ->(console_sem).lock ->&policy->lock ->&list->lock#31 ->&base->lock ->&x->lock FD: 131 BD: 1 +.+.: (wq_completion)wg-kex-wg2#11 ->(work_completion)(&peer->transmit_handshake_work) ->(work_completion)(&peer->clear_peer_work) FD: 193 BD: 1 +.+.: (wq_completion)wg-kex-wg2#12 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) FD: 194 BD: 1 +.+.: (wq_completion)wg-crypt-wg2#6 ->(work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) ->(work_completion)(&peer->transmit_packet_work) FD: 147 BD: 1 +.+.: (wq_completion)phy11 ->(work_completion)(&local->reconfig_filter) ->(work_completion)(&(&local->roc_work)->work) FD: 21 BD: 1 +.+.: (wq_completion)phy10 ->(work_completion)(&local->reconfig_filter) FD: 147 BD: 1 +.+.: (wq_completion)phy12 ->(work_completion)(&local->reconfig_filter) ->(work_completion)(&(&local->roc_work)->work) FD: 1 BD: 16 +.+.: cgroup_mutex.wait_lock FD: 21 BD: 1 +.+.: (wq_completion)phy13 ->(work_completion)(&local->reconfig_filter) FD: 147 BD: 1 +.+.: (wq_completion)phy14 ->(work_completion)(&local->reconfig_filter) ->(work_completion)(&(&local->roc_work)->work) FD: 29 BD: 133 ....: &sk->sk_lock.wq ->&p->pi_lock FD: 1 BD: 3736 ++.-: &table->lock#3 FD: 11 BD: 89 +...: &sctp_port_hashtable[i].lock ->&____s->seqcount#2 ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&obj_hash[i].lock FD: 29 BD: 106 ..-.: &asoc->wait ->&p->pi_lock FD: 1 BD: 91 ..-.: key#22 FD: 12 BD: 260 +.-.: sctp_assocs_id_lock ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->&n->list_lock ->&____s->seqcount#2 ->&____s->seqcount FD: 1 BD: 107 ..-.: &list->lock#18 FD: 236 BD: 3 +.+.: sk_lock-AF_TIPC ->&rq->__lock ->slock-AF_TIPC ->fs_reclaim ->pool_lock#2 ->&c->lock ->&mm->mmap_lock ->&list->lock#19 ->&obj_hash[i].lock ->&n->list_lock ->clock-AF_TIPC ->&list->lock#23 ->&ei->socket.wq.wait ->&base->lock ->&____s->seqcount ->&____s->seqcount#2 ->remove_cache_srcu ->rcu_node_0 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&cfs_rq->removed.lock ->&rcu_state.expedited_wq ->quarantine_lock ->&zone->lock ->&srv->idr_lock ->&tn->nametbl_lock ->&con->sub_lock ->&con->outqueue_lock ->&tipc_net(net)->bclock ->tk_core.seq.seqcount ->&list->lock#5 ->stock_lock ->&dir->lock ->batched_entropy_u32.lock ->sk_lock-AF_TIPC/1 FD: 38 BD: 5 +...: slock-AF_TIPC ->&list->lock#23 ->&list->lock#42 ->pool_lock#2 ->&c->lock ->&obj_hash[i].lock ->&list->lock#19 ->&____s->seqcount#2 ->&____s->seqcount ->&base->lock ->&n->list_lock ->&sk->sk_lock.wq FD: 1 BD: 82 +...: &list->lock#19 FD: 1 BD: 4 +...: clock-AF_TIPC FD: 179 BD: 3 +.+.: sk_lock-AF_INET/1 ->slock-AF_INET ->rlock-AF_INET ->&obj_hash[i].lock ->pool_lock#2 ->&list->lock#17 ->&____s->seqcount ->&list->lock#18 ->fs_reclaim ->&c->lock ->tk_core.seq.seqcount ->krc.lock ->&base->lock ->sctp_assocs_id_lock ->&n->list_lock ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 1 BD: 4 +.-.: rlock-AF_INET FD: 1 BD: 24 +.+.: sco_sk_list.lock FD: 221 BD: 22 +.+.: sk_lock-AF_BLUETOOTH-BTPROTO_SCO ->slock-AF_BLUETOOTH-BTPROTO_SCO ->&conn->lock#2 ->&obj_hash[i].lock ->&base->lock ->&ei->socket.wq.wait ->rcu_node_0 ->&rq->__lock ->&cfs_rq->removed.lock ->pool_lock#2 ->sco_sk_list.lock ->&mm->mmap_lock FD: 1 BD: 23 +...: slock-AF_BLUETOOTH-BTPROTO_SCO FD: 1 BD: 23 +.+.: &conn->lock#2 FD: 1 BD: 79 +...: &msk->pm.lock FD: 525 BD: 4 +.+.: (work_completion)(&msk->work) ->&rq->__lock ->sk_lock-AF_INET6 ->slock-AF_INET6 ->sk_lock-AF_INET ->slock-AF_INET FD: 15 BD: 1 +...: &nr_netdev_xmit_lock_key ->&obj_hash[i].lock ->nr_node_list_lock ->pool_lock#2 ->quarantine_lock FD: 1 BD: 2 +...: nr_node_list_lock FD: 1 BD: 1 ....: net_ratelimit_state.lock FD: 1 BD: 4 +...: clock-AF_NETROM FD: 227 BD: 3 +.+.: sk_lock-AF_NETROM ->&rq->__lock ->slock-AF_NETROM ->&obj_hash[i].lock ->wlock-AF_NETROM ->&list->lock#20 ->nr_list_lock ->rlock-AF_NETROM ->ax25_uid_lock ->pool_lock#2 ->&list->lock#38 ->&base->lock ->&ei->socket.wq.wait ->fs_reclaim ->&c->lock ->&mm->mmap_lock ->&n->list_lock ->clock-AF_NETROM FD: 39 BD: 6 +.-.: slock-AF_NETROM ->&sk->sk_lock.wq ->&obj_hash[i].lock ->&base->lock ->wlock-AF_NETROM ->&list->lock#20 ->&c->lock ->pool_lock#2 ->&list->lock#38 ->rlock-AF_NETROM FD: 1 BD: 7 ..-.: wlock-AF_NETROM FD: 1 BD: 7 ..-.: &list->lock#20 FD: 1 BD: 6 +.-.: nr_list_lock FD: 1 BD: 7 ..-.: rlock-AF_NETROM FD: 1019 BD: 1 +.+.: (wq_completion)netns ->net_cleanup_work FD: 1018 BD: 2 +.+.: net_cleanup_work ->pernet_ops_rwsem ->rcu_state.barrier_mutex ->&obj_hash[i].lock ->pool_lock#2 ->krc.lock ->&dir->lock ->stock_lock ->&rq->__lock ->rcu_state.barrier_mutex.wait_lock ->&p->pi_lock FD: 1 BD: 5 +...: &net->nsid_lock FD: 1 BD: 5 +...: &tn->node_list_lock FD: 1 BD: 5 +.+.: netns_bpf_mutex FD: 1 BD: 5 ....: (&net->fs_probe_timer) FD: 1 BD: 7 ++++: &net->cells_lock FD: 1 BD: 5 ....: (&net->cells_timer) FD: 34 BD: 1 +.+.: (wq_completion)afs ->(work_completion)(&net->cells_manager) ->(work_completion)(&net->fs_manager) FD: 31 BD: 2 +.+.: (work_completion)(&net->cells_manager) ->&net->cells_lock ->bit_wait_table + i ->&rq->__lock FD: 1 BD: 5 ....: (&net->fs_timer) FD: 31 BD: 2 +.+.: (work_completion)(&net->fs_manager) ->&(&net->fs_lock)->lock ->bit_wait_table + i ->&rq->__lock FD: 1 BD: 3 +.+.: &(&net->fs_lock)->lock FD: 1 BD: 8 +.+.: &rx->incoming_lock FD: 1 BD: 6 +.+.: &call->notify_lock FD: 1 BD: 9 ....: (rxrpc_call_limiter).lock FD: 1 BD: 9 +.+.: &rx->recvmsg_lock FD: 1 BD: 8 ....: (&call->timer) FD: 1 BD: 10 ....: &list->lock#21 FD: 1 BD: 5 +.+.: (wq_completion)kafsd FD: 1 BD: 5 +...: k-clock-AF_RXRPC FD: 1 BD: 7 ..-.: rlock-AF_RXRPC FD: 1 BD: 1 ....: (&local->client_conn_reap_timer) FD: 1 BD: 1 ....: &list->lock#22 FD: 1 BD: 10 +.+.: (work_completion)(&data->gc_work) FD: 1 BD: 5 +.+.: (work_completion)(&ovs_net->dp_notify_work) FD: 9 BD: 89 +...: &srv->idr_lock ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock FD: 219 BD: 72 +.+.: &net->xdp.lock ->&xs->mutex ->&lock->wait_lock ->&p->pi_lock ->&rq->__lock FD: 1 BD: 5 ....: (&rxnet->service_conn_reap_timer) FD: 1 BD: 83 +...: &nt->cluster_scope_lock FD: 1 BD: 5 +.+.: (work_completion)(&tn->work) FD: 1 BD: 5 +.+.: (work_completion)(&(&c->work)->work) FD: 2 BD: 8 +.+.: (work_completion)(&rxnet->service_conn_reaper) ->&rxnet->conn_lock FD: 420 BD: 5 +.+.: (wq_completion)krdsd ->(work_completion)(&rtn->rds_tcp_accept_w) ->(work_completion)(&(&cp->cp_send_w)->work) ->(work_completion)(&(&cp->cp_recv_w)->work) ->(work_completion)(&cp->cp_down_w) FD: 414 BD: 6 +.+.: (work_completion)(&rtn->rds_tcp_accept_w) ->fs_reclaim ->pool_lock#2 ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#8 ->k-sk_lock-AF_INET6 ->k-slock-AF_INET6 ->&obj_hash[i].lock ->&xa->xa_lock#8 ->&fsnotify_mark_srcu ->&c->lock ->&n->list_lock ->&dir->lock ->once_lock ->&____s->seqcount#2 ->&____s->seqcount ->rds_cong_lock ->rds_trans_sem ->&tc->t_conn_path_lock ->remove_cache_srcu FD: 1 BD: 7 ....: rds_tcp_conn_lock FD: 1 BD: 5 ....: loop_conns_lock FD: 14 BD: 5 +.+.: (wq_completion)l2tp ->(work_completion)(&tunnel->del_work) FD: 1 BD: 72 +.+.: mirred_list_lock FD: 10 BD: 76 +...: &idev->mc_query_lock ->&obj_hash[i].lock FD: 211 BD: 77 +.+.: (work_completion)(&(&idev->mc_report_work)->work) ->&idev->mc_report_lock ->&idev->mc_lock ->&rq->__lock FD: 31 BD: 78 +...: &idev->mc_report_lock ->&obj_hash[i].lock FD: 28 BD: 72 +.+.: &pnn->pndevs.lock ->&rq->__lock ->&c->lock ->pool_lock#2 FD: 28 BD: 72 +.+.: &pnn->routes.lock ->&rq->__lock FD: 1 BD: 21 ....: netdev_unregistering_wq.lock FD: 34 BD: 22 +.+.: (work_completion)(&(&conn->disc_work)->work) ->pool_lock#2 ->&list->lock#7 ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 103 BD: 73 +.-.: (&peer->timer_retransmit_handshake) ->&peer->endpoint_lock ->&obj_hash[i].lock ->&list->lock#14 FD: 33 BD: 1 ..-.: &(&bat_priv->dat.work)->timer FD: 33 BD: 1 ..-.: &(&bat_priv->bla.work)->timer FD: 32 BD: 2 +.+.: (work_completion)(&(&bat_priv->dat.work)->work) ->&hash->list_locks[i] ->&rq->__lock ->&obj_hash[i].lock ->&base->lock ->&cfs_rq->removed.lock ->pool_lock#2 ->rcu_node_0 ->&rcu_state.expedited_wq FD: 1 BD: 3 +...: &hash->list_locks[i] FD: 37 BD: 2 +.+.: (work_completion)(&(&bat_priv->bla.work)->work) ->&rq->__lock ->key#20 ->&obj_hash[i].lock ->&base->lock ->rcu_node_0 ->crngs.lock ->&cfs_rq->removed.lock FD: 1 BD: 4 +.+.: hidp_sk_list.lock FD: 28 BD: 10 +.+.: &fn->fou_lock ->&rq->__lock FD: 289 BD: 5 +.+.: ipvs->sync_mutex ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->stock_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&c->lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#8 ->&rq->__lock ->&dir->lock ->k-sk_lock-AF_INET ->k-slock-AF_INET ->kthread_create_lock ->&p->pi_lock ->&x->wait ->rtnl_mutex.wait_lock FD: 879 BD: 12 ++++: rdma_nets_rwsem ->rdma_nets.xa_lock ->&rq->__lock ->&device->compat_devs_mutex ->&lock->wait_lock ->&p->pi_lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 ->rdma_nets_rwsem.wait_lock FD: 1 BD: 5 +...: k-clock-AF_NETLINK FD: 1 BD: 5 +.+.: &hn->hn_lock FD: 45 BD: 72 +.+.: &caifn->caifdevs.lock ->&obj_hash[i].lock ->&rq->__lock ->pool_lock#2 ->&this->info_list_lock ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock ->&rnp->exp_lock ->rcu_state.exp_mutex FD: 136 BD: 1 +.+.: (wq_completion)inet_frag_wq ->(work_completion)(&fqdir->destroy_work) FD: 135 BD: 2 +.+.: (work_completion)(&fqdir->destroy_work) ->(work_completion)(&ht->run_work) ->&ht->mutex FD: 45 BD: 2 +.+.: fqdir_free_work ->rcu_state.barrier_mutex ->&obj_hash[i].lock ->pool_lock#2 ->rcu_state.barrier_mutex.wait_lock ->&p->pi_lock ->quarantine_lock ->&base->lock FD: 1 BD: 74 +...: &this->info_list_lock FD: 1 BD: 5 +.+.: &pnetids_ndev->lock FD: 212 BD: 75 +.+.: k-sk_lock-AF_INET6/1 ->k-slock-AF_INET6 ->rlock-AF_INET6 ->&list->lock#17 ->pool_lock#2 ->&dir->lock ->fs_reclaim ->&obj_hash[i].lock ->slock-AF_INET6 ->k-clock-AF_INET6 ->&c->lock ->&rq->__lock ->&n->list_lock FD: 97 BD: 80 +.-.: k-slock-AF_INET6/1 ->&sctp_ep_hashtable[i].lock ->&obj_hash[i].lock ->pool_lock#2 ->k-clock-AF_INET6 ->tk_core.seq.seqcount ->&____s->seqcount#2 ->&____s->seqcount ->&c->lock ->&base->lock ->&tcp_hashinfo.bhash[i].lock ->&hashinfo->ehash_locks[i] ->elock-AF_INET6 ->slock-AF_INET6 ->&n->list_lock ->clock-AF_INET6 FD: 1 BD: 86 +...: &list->lock#23 FD: 152 BD: 1 +.+.: (wq_completion)tipc_rcv ->(work_completion)(&srv->awork) ->(work_completion)(&con->rwork) FD: 150 BD: 3 +.+.: (work_completion)(&srv->awork) ->&srv->idr_lock ->fs_reclaim ->pool_lock#2 ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#8 ->k-sk_lock-AF_TIPC ->k-slock-AF_TIPC ->&obj_hash[i].lock ->k-clock-AF_TIPC ->&rq->__lock ->&xa->xa_lock#8 ->&fsnotify_mark_srcu ->&c->lock ->&n->list_lock FD: 142 BD: 77 +.+.: k-sk_lock-AF_TIPC/1 ->k-slock-AF_TIPC ->&obj_hash[i].lock ->&base->lock ->fs_reclaim ->pool_lock#2 ->&list->lock#23 ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount FD: 144 BD: 3 +.+.: (work_completion)(&con->rwork) ->k-sk_lock-AF_TIPC ->k-slock-AF_TIPC ->k-clock-AF_TIPC ->&srv->idr_lock ->&obj_hash[i].lock ->pool_lock#2 ->&sb->s_type->i_lock_key#8 ->&xa->xa_lock#8 ->&fsnotify_mark_srcu ->&con->outqueue_lock ->&rq->__lock FD: 21 BD: 1 ....: &trie->lock ->stock_lock ->pool_lock#2 ->&obj_hash[i].lock ->krc.lock ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 137 BD: 26 +.+.: &chan->lock/1 ->&obj_hash[i].lock ->&base->lock ->chan_list_lock ->&conn->ident_lock ->fs_reclaim ->pool_lock#2 ->&list->lock#9 ->&rq->__lock ->sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP/1 ->slock-AF_BLUETOOTH-BTPROTO_L2CAP ->clock-AF_BLUETOOTH ->rlock-AF_BLUETOOTH ->wlock-AF_BLUETOOTH ->&dir->lock ->&cfs_rq->removed.lock ->quarantine_lock FD: 40 BD: 81 +...: &con->sub_lock ->&tn->nametbl_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 4 BD: 89 +...: &con->outqueue_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 13 BD: 248 +...: link_idr_lock ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->&n->list_lock ->&____s->seqcount#2 ->&____s->seqcount FD: 292 BD: 4 +.+.: tracepoints_mutex ->fs_reclaim ->pool_lock#2 ->cpu_hotplug_lock ->tracepoint_srcu_srcu_usage.lock ->&ACCESS_PRIVATE(sdp, lock) ->&obj_hash[i].lock ->tasklist_lock ->&c->lock ->&rq->__lock ->rcu_node_0 ->tracepoint_srcu ->&x->wait#3 ->tracepoints_mutex.wait_lock ->&n->list_lock ->remove_cache_srcu ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock ->pool_lock ->reg_lock ->&____s->seqcount#2 ->&rnp->exp_lock ->rcu_state.exp_mutex FD: 2 BD: 6 +.+.: &match->lock ->ptype_lock FD: 32 BD: 9 ....: tracepoint_srcu_srcu_usage.lock ->&obj_hash[i].lock ->&ACCESS_PRIVATE(sdp, lock) ->&base->lock FD: 31 BD: 1 +.-.: (&sdp->delay_work) FD: 1 BD: 3 ....: &ep->poll_wait FD: 32 BD: 4076 ....: &ep->poll_wait/1 ->&ep->lock FD: 171 BD: 151 .+.+: sb_pagefaults ->tk_core.seq.seqcount ->mmu_notifier_invalidate_range_start ->&c->lock ->pool_lock#2 ->&journal->j_state_lock ->jbd2_handle ->&obj_hash[i].lock ->mapping.invalidate_lock ->remove_cache_srcu ->&base->lock ->&rq->__lock ->&sem->wait_lock ->&p->pi_lock ->rcu_node_0 ->&____s->seqcount#2 ->&____s->seqcount ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&n->list_lock ->&journal->j_wait_transaction_locked ->&sb->s_type->i_lock_key#22 ->&wb->list_lock ->&cfs_rq->removed.lock FD: 29 BD: 27 +.+.: sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP/1 ->slock-AF_BLUETOOTH-BTPROTO_L2CAP ->&rq->__lock FD: 227 BD: 72 +.+.: sk_lock-AF_CAN ->slock-AF_CAN ->clock-AF_CAN ->&obj_hash[i].lock ->&rq->__lock ->fs_reclaim ->&____s->seqcount#2 ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&net->can.rcvlists_lock ->pcpu_lock ->proc_subdir_lock ->proc_inum_ida.xa_lock ->&ent->pde_unload_lock ->&mm->mmap_lock ->j1939_netdev_lock ->&priv->lock ->&priv->j1939_socks_lock ->&jsk->sk_session_queue_lock FD: 1 BD: 73 +...: slock-AF_CAN FD: 1 BD: 73 ++..: clock-AF_CAN FD: 1 BD: 3 ..-.: rlock-AF_CAN FD: 1 BD: 3 ..-.: elock-AF_CAN FD: 168 BD: 9 +.+.: &sb->s_type->i_mutex_key#8/4 ->&ei->i_data_sem ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&journal->j_state_lock ->&rq->__lock ->jbd2_handle ->&obj_hash[i].lock ->&journal->j_wait_commit ->&journal->j_wait_done_commit ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&sem->wait_lock ->&ei->i_data_sem/1 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&cfs_rq->removed.lock ->&p->pi_lock ->&n->list_lock ->key#3 ->key#14 ->remove_cache_srcu FD: 122 BD: 166 +.+.: &ei->i_data_sem/1 ->mmu_notifier_invalidate_range_start ->pool_lock#2 ->&ei->i_es_lock ->&ei->i_raw_lock ->&obj_hash[i].lock ->&____s->seqcount ->&c->lock ->&rq->__lock ->&n->list_lock ->&ei->i_prealloc_lock ->&mapping->private_lock ->&ret->b_state_lock ->bit_wait_table + i ->rcu_node_0 ->remove_cache_srcu ->&sem->wait_lock ->&bgl->locks[i].lock ->&cfs_rq->removed.lock ->&sb->s_type->i_lock_key#22 ->&journal->j_state_lock ->&journal->j_revoke_lock ->&____s->seqcount#2 ->stock_lock ->&xa->xa_lock#8 ->lock#4 ->key#3 ->key#14 ->batched_entropy_u8.lock ->kfence_freelist_lock ->&meta->lock ->&wb->list_lock ->quarantine_lock FD: 285 BD: 1 +.+.: bpf_stats_enabled_mutex ->&newf->file_lock ->fs_reclaim ->pool_lock#2 ->&xa->xa_lock#4 ->&obj_hash[i].lock ->stock_lock ->&sb->s_type->i_lock_key#15 ->cpu_hotplug_lock ->&c->lock FD: 28 BD: 5 +.+.: &sn->gssp_lock ->&rq->__lock FD: 1 BD: 8 +.+.: &cd->hash_lock FD: 50 BD: 6 +.+.: xfrm_state_gc_work ->xfrm_state_gc_lock ->&rnp->exp_lock ->rcu_state.exp_mutex ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock ->&obj_hash[i].lock ->(&x->rtimer) ->&base->lock ->pool_lock#2 FD: 18 BD: 80 +.-.: &net->xfrm.xfrm_state_lock ->hrtimer_bases.lock ->&obj_hash[i].lock ->&base->lock ->pool_lock#2 FD: 13 BD: 76 +.-.: ip6_fl_lock ->&obj_hash[i].lock ->&base->lock ->pool_lock#2 FD: 1 BD: 5 ....: (&net->ipv6.ip6_fib_timer) FD: 1 BD: 72 ....: (&mrt->ipmr_expire_timer) FD: 288 BD: 78 +.+.: __ip_vs_mutex ->&ipvs->dest_trash_lock ->&obj_hash[i].lock ->pool_lock#2 ->ip_vs_sched_mutex ->nf_hook_mutex ->cpu_hotplug_lock ->&rq->__lock ->fs_reclaim ->pcpu_alloc_mutex ->&c->lock ->ipvs->est_mutex ->(console_sem).lock FD: 1 BD: 5 ....: (&ipvs->dest_trash_timer) FD: 28 BD: 5 +.+.: (work_completion)(&(&ipvs->expire_nodest_conn_work)->work) ->&rq->__lock FD: 138 BD: 6 +.+.: (work_completion)(&(&ipvs->est_reload_work)->work) ->ipvs->est_mutex FD: 1 BD: 5 +...: recent_lock FD: 134 BD: 5 +.+.: hashlimit_mutex ->fs_reclaim ->pool_lock#2 ->free_vmap_area_lock ->vmap_area_lock ->&____s->seqcount ->init_mm.page_table_lock ->&rq->__lock ->proc_subdir_lock ->proc_inum_ida.xa_lock ->&obj_hash[i].lock ->&base->lock ->&ent->pde_unload_lock ->&c->lock FD: 1 BD: 5 +.+.: trans_gc_work FD: 1 BD: 5 +.+.: (work_completion)(&(&cnet->ecache.dwork)->work) FD: 28 BD: 5 +.+.: (work_completion)(&net->xfrm.policy_hash_work) ->&rq->__lock FD: 60 BD: 79 +...: &net->xfrm.xfrm_policy_lock ->&____s->seqcount#12 ->&c->lock ->pool_lock#2 ->&obj_hash[i].lock ->&base->lock ->&____s->seqcount#16 ->krc.lock FD: 1 BD: 5 +.+.: (work_completion)(&net->xfrm.state_hash_work) FD: 1 BD: 147 +.+.: freezer_mutex.wait_lock FD: 1 BD: 74 ..-.: &list->lock#25 FD: 1 BD: 1 ..-.: &list->lock#26 FD: 1 BD: 4 +.-.: x25_list_lock FD: 1 BD: 1 +.-.: x25_forward_list_lock FD: 35 BD: 73 +.-.: (&peer->timer_send_keepalive) ->pool_lock#2 ->&list->lock#14 ->tk_core.seq.seqcount ->&c->lock ->init_task.mems_allowed_seq.seqcount ->&____s->seqcount ->&n->list_lock FD: 1 BD: 1 ....: _rs.lock#2 FD: 1 BD: 74 ..-.: &list->lock#24 FD: 1 BD: 3 +.+.: bcm_notifier_lock FD: 1 BD: 3 +...: clock-AF_ROSE FD: 33 BD: 3 +.+.: sk_lock-AF_ROSE ->&rq->__lock ->slock-AF_ROSE ->rose_list_lock ->&obj_hash[i].lock ->wlock-AF_ROSE ->&list->lock#27 ->rlock-AF_ROSE FD: 1 BD: 4 +...: slock-AF_ROSE FD: 1 BD: 4 ....: wlock-AF_ROSE FD: 1 BD: 4 ....: &list->lock#27 FD: 1 BD: 4 +...: rose_list_lock FD: 1 BD: 4 ....: rlock-AF_ROSE FD: 1 BD: 2 +.+.: loop_validate_mutex.wait_lock FD: 1 BD: 158 +.+.: text_mutex.wait_lock FD: 1 BD: 103 +.-.: fastopen_seqlock.seqcount FD: 1 BD: 3 +...: rds_sock_lock FD: 1 BD: 3 +...: clock-AF_RDS FD: 2 BD: 3 ....: &rs->rs_recv_lock ->&rs->rs_lock FD: 1 BD: 3 ....: rds_cong_monitor_lock FD: 1 BD: 10 ....: rds_cong_lock FD: 1 BD: 4 ....: &rs->rs_lock FD: 1 BD: 3 ....: &rs->rs_rdma_lock FD: 1 BD: 3 ....: &q->lock FD: 900 BD: 2 +.+.: (work_completion)(&nlk->work) ->&obj_hash[i].lock ->pool_lock#2 ->rlock-AF_NETLINK ->&dir->lock ->vmap_area_lock ->purge_vmap_area_lock ->&rq->__lock ->genl_mutex ->genl_mutex.wait_lock ->&p->pi_lock ->quarantine_lock FD: 4 BD: 3721 +.-.: icmp_global.lock ->batched_entropy_u8.lock FD: 58 BD: 1 +.-.: (&p->forward_delay_timer) ->&br->lock FD: 130 BD: 73 +.+.: &data->nh_lock ->fs_reclaim ->pool_lock#2 FD: 21 BD: 1 +.+.: (wq_completion)phy15 ->(work_completion)(&local->reconfig_filter) FD: 139 BD: 4 +.+.: pfkey_mutex ->crypto_alg_sem ->(kmod_concurrent_max).lock ->fs_reclaim ->pool_lock#2 ->&c->lock ->&obj_hash[i].lock ->&x->wait#17 ->&rq->__lock ->running_helpers_waitq.lock ->(crypto_chain).rwsem ->&x->wait#21 ->&base->lock ->(&timer.timer) ->remove_cache_srcu ->&n->list_lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->&____s->seqcount ->&____s->seqcount#2 ->&cfs_rq->removed.lock FD: 1 BD: 10 +.-.: xfrm_state_gc_lock FD: 1 BD: 4 ....: rlock-AF_KEY FD: 1 BD: 3 +...: clock-AF_KEY FD: 1 BD: 3 ....: wlock-AF_KEY FD: 36 BD: 7 +.-.: (&x->rtimer) ->&x->lock FD: 11 BD: 101 +.--: &____s->seqcount#13 ->&____s->seqcount#2 ->&____s->seqcount ->pool_lock#2 ->&c->lock ->&obj_hash[i].lock FD: 12 BD: 100 +.-.: &(&bp->lock)->lock ->&____s->seqcount#13 FD: 42 BD: 76 +.-.: &q->lock#2 ->&obj_hash[i].lock ->pool_lock#2 ->&base->lock ->&c->lock ->&zone->lock ->&____s->seqcount ->(console_sem).lock FD: 1 BD: 1 +...: clock-AF_LLC FD: 245 BD: 3 +.+.: sk_lock-AF_LLC ->slock-AF_LLC ->fs_reclaim ->pool_lock#2 ->&dir->lock#2 ->&sap->sk_lock ->llc_sap_list_lock ->&rq->__lock ->&c->lock ->wlock-AF_LLC ->&obj_hash[i].lock ->&base->lock ->&ei->socket.wq.wait ->rcu_node_0 ->quarantine_lock ->&n->list_lock ->&meta->lock ->kfence_freelist_lock ->&mm->mmap_lock FD: 37 BD: 5 +.-.: slock-AF_LLC ->&sk->sk_lock.wq ->pool_lock#2 ->&c->lock ->wlock-AF_LLC ->&obj_hash[i].lock ->&base->lock FD: 1 BD: 4 +.-.: &sap->sk_lock FD: 1 BD: 3 ....: (&llc->pf_cycle_timer.timer) FD: 38 BD: 3 +.-.: (&llc->ack_timer.timer) ->pool_lock#2 ->slock-AF_LLC ->&c->lock FD: 1 BD: 3 ....: (&llc->rej_sent_timer.timer) FD: 1 BD: 3 ....: (&llc->busy_state_timer.timer) FD: 1 BD: 3 ....: rlock-AF_LLC FD: 1 BD: 6 ..-.: wlock-AF_LLC FD: 1 BD: 3 ....: &list->lock#28 FD: 1 BD: 9 ....: &sem->waiters FD: 1 BD: 5 ....: tracepoint_srcu FD: 1 BD: 5 +.+.: tracepoints_mutex.wait_lock FD: 1 BD: 3 +.+.: isotp_notifier_lock FD: 1 BD: 93 +...: &token_hash[i].lock FD: 293 BD: 3 +.+.: sched_register_mutex ->tracepoints_mutex FD: 1 BD: 72 +.-.: wlock-AF_UNSPEC FD: 1 BD: 72 ....: elock-AF_UNSPEC FD: 1 BD: 77 +.+.: rcu_state.barrier_mutex.wait_lock FD: 130 BD: 73 +.+.: &block->lock ->fs_reclaim ->&c->lock ->pool_lock#2 ->&____s->seqcount#2 ->&____s->seqcount ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&rq->__lock ->&n->list_lock FD: 130 BD: 72 ++++: &block->cb_lock ->flow_indr_block_lock ->&tp->lock FD: 128 BD: 73 +.+.: flow_indr_block_lock ->fs_reclaim ->pool_lock#2 ->&c->lock ->&obj_hash[i].lock ->&n->list_lock ->&rq->__lock FD: 131 BD: 1 .+.+: kn->active#58 ->fs_reclaim ->stock_lock ->&kernfs_locks->open_file_mutex[count] ->&c->lock FD: 132 BD: 72 +.+.: &chain->filter_chain_lock ->&block->lock ->&block->proto_destroy_lock FD: 128 BD: 72 +.+.: &tn->idrinfo->lock ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock FD: 14 BD: 72 +...: &p->tcfa_lock ->&(to_police(*a)->tcfp_lock) ->pool_lock#2 ->&c->lock FD: 5 BD: 73 +...: &(to_police(*a)->tcfp_lock) ->tk_core.seq.seqcount FD: 1 BD: 73 +.+.: &block->proto_destroy_lock FD: 218 BD: 73 +.+.: &xs->mutex ->fs_reclaim ->pool_lock#2 ->free_vmap_area_lock ->vmap_area_lock ->&____s->seqcount ->init_mm.page_table_lock ->&c->lock ->&n->list_lock ->umem_ida.xa_lock ->&rq->__lock ->&mm->mmap_lock ->&____s->seqcount#2 ->&sem->wait_lock ->&p->pi_lock ->&zone->lock ->rcu_node_0 ->&lock->wait_lock ->&obj_hash[i].lock ->&pool->xsk_tx_list_lock ->rcu_state.exp_mutex.wait_lock ->&cfs_rq->removed.lock ->purge_vmap_area_lock FD: 6 BD: 4 +...: &xs->map_list_lock ->stock_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 3 +...: clock-AF_XDP FD: 1 BD: 75 +...: l2tp_ip6_lock FD: 1 BD: 1 ....: _rs.lock#3 FD: 1 BD: 114 ..-.: key#23 FD: 233 BD: 3 +.+.: sk_lock-AF_PPPOX ->slock-AF_PPPOX ->&pn->hash_lock ->clock-AF_PPPOX ->rlock-AF_PPPOX ->fs_reclaim ->pool_lock#2 ->&ps->sk_lock ->&c->lock ->&tunnel->hlist_lock ->&pn->l2tp_session_hlist_lock ->&obj_hash[i].lock ->&list->lock#37 ->chan_lock ->&pch->chan_sem ->&pch->upl ->&pn->all_channels_lock ->&pf->rwait ->&dir->lock ->&list->lock#33 ->&mm->mmap_lock FD: 1 BD: 4 +...: slock-AF_PPPOX FD: 1 BD: 4 +...: clock-AF_PPPOX FD: 1 BD: 4 ..-.: rlock-AF_PPPOX FD: 1 BD: 3 ....: rlock-AF_PHONET FD: 41 BD: 1 +.+.: sk_lock-AF_IEEE802154 ->slock-AF_IEEE802154 ->(console_sem).lock ->&rq->__lock FD: 1 BD: 2 +...: slock-AF_IEEE802154 FD: 1 BD: 140 ....: &rdev->wpan_phy.queue_lock FD: 1 BD: 140 ..-.: &list->lock#29 FD: 1 BD: 140 ....: &rdev->wpan_phy.sync_txq FD: 284 BD: 2 +.+.: ((ipv6_flowlabel_exclusive).work).work ->cpu_hotplug_lock FD: 73 BD: 1 .+.+: sb_writers#13 ->mount_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#27 ->&wb->list_lock FD: 28 BD: 4 +.+.: resource_mutex ->&rq->__lock FD: 1 BD: 3 +...: clock-AF_PHONET FD: 1 BD: 3 +...: dgram_lock FD: 1 BD: 5 +.+.: &pnsocks.lock FD: 31 BD: 1 ..-.: net/ipv6/ip6_flowlabel.c:57 FD: 1 BD: 3 ++.-: raw_lock FD: 1 BD: 3 +...: clock-AF_IEEE802154 FD: 1 BD: 3 ....: rlock-AF_IEEE802154 FD: 1 BD: 75 +.+.: tcpv6_prot_mutex FD: 1 BD: 75 +...: device_spinlock FD: 1 BD: 4 +...: unix_dgram_prot_lock FD: 27 BD: 78 +...: &htab->buckets[i].lock ->stock_lock ->pool_lock#2 ->&psock->link_lock ->&obj_hash[i].lock ->krc.lock ->clock-AF_UNIX ->&psock->ingress_lock ->&c->lock ->clock-AF_INET FD: 4 BD: 80 +...: &psock->link_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 79 +...: &psock->ingress_lock FD: 1 BD: 5 +.+.: (work_completion)(&(&psock->work)->work) FD: 53 BD: 2 +.+.: (work_completion)(&(&psock->rwork)->work) ->&obj_hash[i].lock ->(work_completion)(&(&psock->work)->work) ->&list->lock#30 ->rlock-AF_UNIX ->pool_lock#2 ->&dir->lock ->stock_lock FD: 1 BD: 3 ....: &list->lock#30 FD: 1 BD: 3 +.+.: (work_completion)(&(&sw_ctx_tx->tx_work.work)->work) FD: 1 BD: 75 +...: &sw_ctx_tx->encrypt_compl_lock FD: 1 BD: 3 +.+.: (work_completion)(&strp->work) FD: 13 BD: 85 ..-.: &local->ack_status_lock ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount#2 ->&____s->seqcount ->&n->list_lock FD: 1 BD: 4 +...: smc_v4_hashinfo.lock FD: 481 BD: 4 +.+.: &smc->clcsock_release_lock ->&mm->mmap_lock ->&rq->__lock ->k-sk_lock-AF_INET ->k-slock-AF_INET ->pool_lock#2 ->&dir->lock ->&obj_hash[i].lock ->stock_lock ->&sb->s_type->i_lock_key#8 ->&xa->xa_lock#8 ->&fsnotify_mark_srcu ->&net->smc.mutex_fback_rsn ->k-clock-AF_INET ->&base->lock ->(&timer.timer) ->nf_sockopt_mutex ->__ip_vs_mutex ->k-sk_lock-AF_INET6 ->k-slock-AF_INET6 ->fs_reclaim FD: 28 BD: 3 +.+.: (work_completion)(&smc->connect_work) ->&rq->__lock FD: 486 BD: 3 +.+.: sk_lock-AF_SMC ->&rq->__lock ->slock-AF_SMC ->smc_v4_hashinfo.lock ->clock-AF_SMC ->&smc->clcsock_release_lock ->k-sk_lock-AF_INET ->k-slock-AF_INET ->k-clock-AF_INET ->smc_v6_hashinfo.lock FD: 1 BD: 4 +...: slock-AF_SMC FD: 1 BD: 4 +...: clock-AF_SMC FD: 1 BD: 75 ....: &____s->seqcount#14 FD: 1 BD: 5 ++.-: &policy->lock FD: 1 BD: 4 ....: &list->lock#31 FD: 18 BD: 109 +.-.: &dccp_hashinfo.bhash[i].lock ->stock_lock ->&____s->seqcount#2 ->&____s->seqcount ->pool_lock#2 ->&c->lock ->clock-AF_INET6 ->&dccp_hashinfo.bhash2[i].lock ->&obj_hash[i].lock FD: 17 BD: 110 +.-.: &dccp_hashinfo.bhash2[i].lock ->stock_lock ->&____s->seqcount#2 ->&____s->seqcount ->pool_lock#2 ->&c->lock ->clock-AF_INET6 ->batched_entropy_u8.lock ->&hashinfo->ehash_locks[i] ->&obj_hash[i].lock FD: 101 BD: 1 +.-.: &dreq->dreq_lock ->pool_lock#2 ->&dir->lock ->slock-AF_INET6 FD: 1 BD: 75 ....: wlock-AF_INET6 FD: 218 BD: 3 +.+.: sk_lock-AF_X25 ->&rq->__lock ->slock-AF_X25 ->wlock-AF_X25 ->&list->lock#32 ->&obj_hash[i].lock ->x25_list_lock ->rlock-AF_X25 ->&mm->mmap_lock FD: 1 BD: 4 +...: slock-AF_X25 FD: 1 BD: 4 ....: wlock-AF_X25 FD: 1 BD: 4 ....: &list->lock#32 FD: 1 BD: 4 ....: rlock-AF_X25 FD: 2 BD: 2 +.+.: &(&net->xfrm.policy_hthresh.lock)->lock ->&____s->seqcount#15 FD: 1 BD: 6 +.+.: &____s->seqcount#15 FD: 63 BD: 2 +.+.: (work_completion)(&net->xfrm.policy_hthresh.work) ->hash_resize_mutex FD: 62 BD: 3 +.+.: hash_resize_mutex ->&____s->seqcount#15 ->&net->xfrm.xfrm_policy_lock FD: 219 BD: 1 +.+.: sk_lock-AF_RDS ->slock-AF_RDS ->&mm->mmap_lock ->rds_trans_sem FD: 1 BD: 2 +...: slock-AF_RDS FD: 134 BD: 147 +.+.: &sb->s_type->i_mutex_key#21 ->tk_core.seq.seqcount ->fs_reclaim ->&c->lock ->pool_lock#2 ->&resv_map->lock ->hugetlb_lock FD: 73 BD: 1 .+.+: sb_writers#14 ->mount_lock ->tk_core.seq.seqcount ->&sb->s_type->i_lock_key#16 ->&wb->list_lock ->&rq->__lock FD: 34 BD: 149 +.+.: &hugetlbfs_i_mmap_rwsem_key ->&obj_hash[i].lock ->pool_lock#2 ->ptlock_ptr(page) ->&____s->seqcount ->&rq->__lock FD: 131 BD: 148 ++++: &vma_lock->rw_sema ->&hugetlbfs_i_mmap_rwsem_key ->fs_reclaim ->&____s->seqcount ->pool_lock#2 ->stock_lock ->&mm->page_table_lock ->&resv_map->lock ->hugetlb_lock ->&xa->xa_lock#8 ->&sb->s_type->i_lock_key#16 ->ptlock_ptr(page) ->&rq->__lock ->&c->lock FD: 4 BD: 150 +.+.: &resv_map->lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 72 +.+.: &net->ipv4.ra_mutex FD: 131 BD: 1 +.+.: &audit_cmd_mutex.lock ->fs_reclaim ->pool_lock#2 ->rlock-AF_NETLINK ->tk_core.seq.seqcount ->&obj_hash[i].lock ->&list->lock ->kauditd_wait.lock ->&rq->__lock ->&c->lock FD: 872 BD: 1 +.+.: ppp_mutex ->&mm->mmap_lock ->fs_reclaim ->stock_lock ->&c->lock ->pool_lock#2 ->stack_depot_init_mutex ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock ->&n->list_lock ->&rq->__lock ->ppp_mutex.wait_lock ->&ppp->wlock ->&____s->seqcount ->&obj_hash[i].lock ->krc.lock ->&dir->lock#2 ->&____s->seqcount#2 ->free_vmap_area_lock ->vmap_area_lock ->pcpu_alloc_mutex ->pack_mutex ->batched_entropy_u32.lock ->text_mutex ->&fp->aux->used_maps_mutex ->init_mm.page_table_lock ->rcu_node_0 ->batched_entropy_u8.lock ->kfence_freelist_lock ->pcpu_alloc_mutex.wait_lock ->remove_cache_srcu ->&ppp->rlock ->(console_sem).lock ->&zone->lock ->&cfs_rq->removed.lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->quarantine_lock ->&meta->lock FD: 129 BD: 72 +.+.: &pn->all_ppp_mutex ->&rq->__lock ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock ->&n->list_lock ->&cfs_rq->removed.lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&____s->seqcount#2 ->&____s->seqcount ->remove_cache_srcu FD: 1 BD: 5 +.+.: mgmt_chan_list_lock.wait_lock FD: 31 BD: 73 +...: &ppp->rlock ->&obj_hash[i].lock FD: 34 BD: 72 +...: &ppp->wlock ->&ppp->rlock ->&list->lock#33 ->&pf->rwait ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 72 +...: &dev_addr_list_lock_key#4 FD: 29 BD: 74 ....: &pf->rwait ->&p->pi_lock FD: 1 BD: 74 ....: &list->lock#33 FD: 1 BD: 5 +.+.: rfcomm_sk_list.lock FD: 216 BD: 3 +.+.: sk_lock-AF_BLUETOOTH-BTPROTO_RFCOMM ->slock-AF_BLUETOOTH-BTPROTO_RFCOMM ->rfcomm_sk_list.lock ->&mm->mmap_lock ->rfcomm_ioctl_mutex FD: 1 BD: 4 +...: slock-AF_BLUETOOTH-BTPROTO_RFCOMM FD: 1 BD: 3 +.+.: &d->lock FD: 1 BD: 3 ....: &list->lock#34 FD: 1 BD: 3 +.+.: &knet->mutex FD: 1 BD: 77 +...: &mux->lock FD: 2 BD: 125 +...: &mux->rx_lock ->rlock-AF_KCM FD: 449 BD: 3 +.+.: sk_lock-AF_KCM ->slock-AF_KCM ->fs_reclaim ->pool_lock#2 ->&____s->seqcount ->&mm->mmap_lock ->&mux->lock ->&rq->__lock ->clock-AF_KCM ->&obj_hash[i].lock ->&c->lock ->&sem->wait_lock ->&p->pi_lock ->&n->list_lock ->sk_lock-AF_INET ->slock-AF_INET FD: 30 BD: 4 +...: slock-AF_KCM ->&sk->sk_lock.wq FD: 1 BD: 4 +...: clock-AF_KCM FD: 1 BD: 3 +.+.: (work_completion)(&kcm->tx_work) FD: 1 BD: 126 ....: rlock-AF_KCM FD: 1 BD: 79 +...: &ipvs->dest_trash_lock FD: 41 BD: 77 +.+.: flowtable_lock ->&rq->__lock ->&ht->lock ->&(&flowtable->gc_work)->timer ->&obj_hash[i].lock ->&base->lock ->(work_completion)(&(&flowtable->gc_work)->work) ->(wq_completion)nf_ft_offload_add ->&wq->mutex ->(wq_completion)nf_ft_offload_del ->(wq_completion)nf_ft_offload_stats ->&x->wait#10 FD: 1 BD: 3 +...: clock-AF_RXRPC FD: 230 BD: 3 +.+.: sk_lock-AF_PHONET ->slock-AF_PHONET ->&pnsocks.lock ->resource_mutex ->&obj_hash[i].lock ->&rq->__lock ->&mm->mmap_lock ->fs_reclaim ->free_vmap_area_lock ->vmap_area_lock ->&____s->seqcount ->stock_lock ->&c->lock ->pcpu_alloc_mutex ->pool_lock#2 ->&f->f_lock ->port_mutex#2 ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock FD: 1 BD: 4 +...: slock-AF_PHONET FD: 1 BD: 3 ....: &list->lock#35 FD: 128 BD: 72 +.+.: &tn->idrinfo->lock#2 ->fs_reclaim ->pool_lock#2 FD: 139 BD: 72 +.+.: zones_mutex ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock ->batched_entropy_u32.lock ->&base->lock ->&rq->__lock ->flowtable_lock FD: 218 BD: 1 +.+.: &tfile->napi_mutex ->&rq->__lock ->&____s->seqcount ->pool_lock#2 ->&mm->mmap_lock ->&obj_hash[i].lock ->pcpu_lock FD: 31 BD: 78 ..-.: &(&flowtable->gc_work)->timer FD: 30 BD: 78 +.+.: (work_completion)(&(&flowtable->gc_work)->work) ->&ht->lock ->&obj_hash[i].lock ->&base->lock ->&rq->__lock FD: 1 BD: 78 +.+.: (wq_completion)nf_ft_offload_add FD: 1 BD: 78 +.+.: (wq_completion)nf_ft_offload_del FD: 1 BD: 78 +.+.: (wq_completion)nf_ft_offload_stats FD: 128 BD: 3 +.+.: callchain_mutex ->fs_reclaim ->pool_lock#2 ->&c->lock ->&n->list_lock ->&rq->__lock ->&obj_hash[i].lock FD: 1 BD: 2 +.+.: vlan_ioctl_mutex.wait_lock FD: 1 BD: 12 ++..: ip_set_ref_lock FD: 1 BD: 6 +.+.: (work_completion)(&(&gc->dwork)->work) FD: 144 BD: 6 +.+.: (work_completion)(&(&local->roc_work)->work) ->&local->mtx ->&lock->wait_lock ->&p->pi_lock ->&rq->__lock FD: 33 BD: 1 ..-.: &(&local->roc_work)->timer FD: 31 BD: 1 ..-.: &(&hctx->run_work)->timer FD: 1 BD: 3 ....: &list->lock#36 FD: 1 BD: 3 +...: clock-AF_NFC FD: 1 BD: 3 ....: rlock-AF_NFC FD: 7 BD: 3 +...: &m->lock ->&xs->map_list_lock FD: 169 BD: 3 +.+.: &journal->j_barrier ->&journal->j_state_lock ->&rq->__lock ->&journal->j_list_lock ->&journal->j_checkpoint_mutex ->jbd2_handle ->&journal->j_wait_commit ->&journal->j_wait_done_commit FD: 1 BD: 72 ....: (&q->adapt_timer) FD: 20 BD: 80 +.+.: &sta->ampdu_mlme.mtx ->&sta->lock FD: 1 BD: 80 +.+.: (work_completion)(&sta->ampdu_mlme.work) FD: 28 BD: 80 +.+.: (work_completion)(&sta->drv_deliver_wk) ->&rq->__lock FD: 133 BD: 1 ++++: kn->active#59 ->fs_reclaim ->stock_lock ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->(wq_completion)cpuset_migrate_mm ->&wq->mutex FD: 10 BD: 12 +...: &pn->l2tp_tunnel_idr_lock ->pool_lock#2 ->&c->lock ->&obj_hash[i].lock ->&n->list_lock FD: 132 BD: 4 +.+.: &ps->sk_lock ->&tunnel->hlist_lock ->fs_reclaim ->pool_lock#2 ->&dir->lock ->&pn->all_channels_lock FD: 2 BD: 11 +...: &tunnel->hlist_lock ->&pn->l2tp_session_hlist_lock FD: 1 BD: 12 +...: &pn->l2tp_session_hlist_lock FD: 1 BD: 4 ....: &list->lock#37 FD: 13 BD: 6 +.+.: (work_completion)(&tunnel->del_work) ->&tunnel->hlist_lock ->&pn->l2tp_tunnel_idr_lock FD: 1 BD: 3 ..-.: wlock-AF_PPPOX FD: 1 BD: 8 +...: l2tp_ip_lock FD: 140 BD: 2 +.+.: ipcomp_resource_mutex ->pcpu_alloc_mutex ->fs_reclaim ->&c->lock ->pool_lock#2 ->free_vmap_area_lock ->vmap_area_lock ->&____s->seqcount ->init_mm.page_table_lock ->crypto_alg_sem ->&rq->__lock FD: 131 BD: 1 .+.+: kn->active#60 ->fs_reclaim ->stock_lock ->&kernfs_locks->open_file_mutex[count] FD: 35 BD: 9 +.-.: &x->lock ->&net->xfrm.xfrm_state_lock ->xfrm_state_gc_lock FD: 2 BD: 1 ....: &loc_l->lock ->&l->lock FD: 1 BD: 2 ....: &l->lock FD: 1 BD: 76 +.+.: stack_depot_init_mutex.wait_lock FD: 43 BD: 1 +.-.: (&q->timer) ->&obj_hash[i].lock ->pool_lock#2 ->&zone->lock FD: 1 BD: 1 +.+.: &mq_lock FD: 130 BD: 2 +.+.: free_ipc_work ->&obj_hash[i].lock ->&pool->lock ->&rq->__lock ->mount_lock ->&fsnotify_mark_srcu ->&type->s_umount_key#47 ->unnamed_dev_ida.xa_lock ->list_lrus_mutex ->&xa->xa_lock#4 ->pool_lock#2 ->sb_lock ->mnt_id_ida.xa_lock ->&ids->rwsem ->(work_completion)(&ht->run_work) ->&ht->mutex ->percpu_counters_lock ->pcpu_lock ->sysctl_lock ->proc_inum_ida.xa_lock ->stock_lock ->&rnp->exp_wq[1] ->&rnp->exp_lock ->rcu_state.exp_mutex ->&rnp->exp_wq[0] ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->quarantine_lock ->&cfs_rq->removed.lock FD: 128 BD: 3 +.+.: &type->s_umount_key#47 ->shrinker_rwsem ->rename_lock.seqcount ->&dentry->d_lock ->&sb->s_type->i_lock_key#20 ->&s->s_inode_list_lock ->&xa->xa_lock#8 ->&obj_hash[i].lock ->pool_lock#2 ->&fsnotify_mark_srcu ->sb_lock FD: 1 BD: 3 +.+.: &ids->rwsem FD: 131 BD: 1 .+.+: kn->active#61 ->&rq->__lock ->fs_reclaim ->stock_lock ->&kernfs_locks->open_file_mutex[count] FD: 32 BD: 79 +.-.: (&ifibss->timer) ->&rdev->wiphy_work_lock FD: 871 BD: 2 +.+.: (work_completion)(&(&rdev->dfs_update_channels_wk)->work) ->rtnl_mutex ->rtnl_mutex.wait_lock ->&p->pi_lock FD: 1 BD: 75 ....: (&local->dynamic_ps_timer) FD: 1 BD: 75 +.+.: (work_completion)(&local->dynamic_ps_enable_work) FD: 1 BD: 75 +.+.: (work_completion)(&sdata->recalc_smps) FD: 178 BD: 77 +.+.: (work_completion)(&link->csa_finalize_work) ->&rq->__lock ->&wdev->mtx FD: 1 BD: 75 +.+.: (work_completion)(&link->color_change_finalize_work) FD: 1 BD: 75 +.+.: (work_completion)(&(&link->dfs_cac_timer_work)->work) FD: 1 BD: 111 ...-: &f->f_owner.lock FD: 31 BD: 2 +.+.: sk_lock-AF_ALG/1 ->slock-AF_ALG FD: 29 BD: 159 ....: &bdi->wb_waitq ->&p->pi_lock FD: 1 BD: 5 ++++: ax25_uid_lock FD: 1 BD: 7 ..-.: &list->lock#38 FD: 44 BD: 1 +.-.: net/netrom/nr_loopback.c:18 ->&list->lock#38 ->nr_list_lock ->pool_lock#2 ->&____s->seqcount ->&obj_hash[i].lock ->&base->lock ->slock-AF_NETROM ->&c->lock ->&dir->lock FD: 1 BD: 5007 ...-: init_task.mems_allowed_seq.seqcount FD: 910 BD: 1 .+.+: &rdma_nl_types[idx].sem ->link_ops_rwsem ->devices_rwsem ->fs_reclaim ->pool_lock#2 ->&rq->__lock ->&device->client_data_rwsem ->&obj_hash[i].lock ->rlock-AF_NETLINK ->&c->lock ->&n->list_lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&____s->seqcount ->&rxe->usdev_lock ->&pdata->netdev_lock ->(console_sem).lock ->nlk_cb_mutex-RDMA FD: 1 BD: 1 ....: _rs.lock#4 FD: 9 BD: 19 +.+.: &pdata->netdev_lock ->pool_lock#2 ->&dir->lock#2 ->ndev_hash_lock FD: 155 BD: 1 +.+.: sock_diag_mutex ->sock_diag_table_mutex ->fs_reclaim ->pool_lock#2 ->rlock-AF_NETLINK ->&c->lock ->&n->list_lock ->&rq->__lock FD: 1 BD: 20 ....: ndev_hash_lock FD: 2 BD: 12 +.+.: devices.xa_lock ->pool_lock#2 FD: 873 BD: 18 +.+.: &rxe->usdev_lock ->&pdata->netdev_lock ->rtnl_mutex ->&rq->__lock ->rtnl_mutex.wait_lock ->&p->pi_lock FD: 130 BD: 3782 +.+.: &table->lock#4 ->&rq->__lock ->fs_reclaim ->&c->lock ->pool_lock#2 ->&obj_hash[i].lock ->&table->rwlock ->&device->event_handler_rwsem FD: 1 BD: 3783 ....: &table->rwlock FD: 28 BD: 3785 ++++: &device->event_handler_rwsem ->&rq->__lock FD: 1 BD: 5 ....: &device->cache_lock FD: 1 BD: 3 +.+.: rdmacg_mutex FD: 30 BD: 14 +.+.: subsys mutex#84 ->&k->k_lock ->&rq->__lock FD: 875 BD: 1 +.+.: (wq_completion)infiniband ->(work_completion)(&work->work)#2 FD: 874 BD: 2 +.+.: (work_completion)(&work->work)#2 ->fs_reclaim ->pool_lock#2 ->&rxe->usdev_lock ->&device->cache_lock ->&obj_hash[i].lock ->&device->event_handler_rwsem FD: 317 BD: 13 ++++: &device->client_data_rwsem ->&xa->xa_lock#16 ->&rq->__lock ->fs_reclaim ->&c->lock ->&n->list_lock ->pool_lock#2 ->&xa->xa_lock#17 ->&xa->xa_lock#18 ->crngs.lock ->free_vmap_area_lock ->vmap_area_lock ->&____s->seqcount ->init_mm.page_table_lock ->&obj_hash[i].lock ->&cq->cq_lock ->stock_lock ->mmu_notifier_invalidate_range_start ->&sb->s_type->i_lock_key#8 ->&dir->lock ->&qp->state_lock ->cpu_hotplug_lock ->kthread_create_lock ->&p->pi_lock ->&x->wait ->wq_pool_mutex ->ib_mad_port_list_lock ->&mad_queue->lock ->&qp->rq.producer_lock ->&____s->seqcount#2 ->rcu_node_0 ->ib_mad_clients.xa_lock ->&port_priv->reg_lock ->ib_agent_port_list_lock ->lock ->&root->kernfs_rwsem ->&cm.device_lock ->lock#7 ->umad_ida.xa_lock ->&x->wait#9 ->chrdevs_lock ->&k->list_lock ->gdp_mutex ->bus_type_sem ->sysfs_symlink_target_lock ->&dev->power.lock ->dpm_list_mtx ->req_lock ->&x->wait#11 ->uevent_sock_mutex ->subsys mutex#85 ->pcpu_alloc_mutex ->uverbs_ida.xa_lock ->subsys mutex#86 ->subsys mutex#87 ->(console_sem).lock ->rds_ib_devices_lock ->ib_nodev_conns_lock ->smc_ib_devices.mutex ->&device->event_handler_rwsem ->&pnettable->lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) ->&rcu_state.expedited_wq ->&cfs_rq->removed.lock FD: 9 BD: 16 +.+.: &xa->xa_lock#16 ->pool_lock#2 ->&obj_hash[i].lock ->&c->lock FD: 8 BD: 14 +.+.: &xa->xa_lock#17 ->pool_lock#2 ->&c->lock ->&n->list_lock FD: 7 BD: 75 +.+.: &xa->xa_lock#18 ->pool_lock#2 ->&c->lock ->batched_entropy_u8.lock ->kfence_freelist_lock FD: 1 BD: 14 ....: &cq->cq_lock FD: 1 BD: 14 ....: &qp->state_lock FD: 1 BD: 14 ....: ib_mad_port_list_lock FD: 1 BD: 14 ....: &mad_queue->lock FD: 1 BD: 14 ....: &qp->rq.producer_lock FD: 2 BD: 14 +.+.: ib_mad_clients.xa_lock ->pool_lock#2 FD: 2 BD: 14 ....: &port_priv->reg_lock ->pool_lock#2 FD: 1 BD: 14 ....: ib_agent_port_list_lock FD: 1 BD: 14 ....: &cm.device_lock FD: 1 BD: 75 +.+.: &id_priv->qp_mutex FD: 2 BD: 75 +.+.: &xa->xa_lock#19 ->pool_lock#2 FD: 2 BD: 75 ....: &cm_id_priv->lock ->&cm.lock FD: 1 BD: 76 ....: &cm.lock FD: 1 BD: 14 ....: umad_ida.xa_lock FD: 3 BD: 14 +.+.: subsys mutex#85 ->&k->k_lock FD: 1 BD: 14 ....: uverbs_ida.xa_lock FD: 3 BD: 14 +.+.: subsys mutex#86 ->&k->k_lock FD: 30 BD: 14 +.+.: subsys mutex#87 ->&rq->__lock ->&k->k_lock FD: 29 BD: 16 ++++: rds_ib_devices_lock ->&rq->__lock ->&pool->flush_lock FD: 1 BD: 14 +.+.: ib_nodev_conns_lock FD: 1 BD: 1 ....: _rs.lock#5 FD: 30 BD: 4 +.+.: &pipe->mutex#2/2 ->&lock->wait_lock ->&p->pi_lock ->&rq->__lock FD: 1 BD: 1 ....: _rs.lock#6 FD: 875 BD: 2 +.+.: (work_completion)(&smcibdev->port_event_work) ->&rxe->usdev_lock ->&table->rwlock ->smc_lgr_list.lock FD: 1 BD: 3 +...: smc_lgr_list.lock FD: 876 BD: 13 +.+.: &device->compat_devs_mutex ->&rq->__lock ->fs_reclaim ->&xa->xa_lock#16 ->&c->lock ->pool_lock#2 ->&x->wait#9 ->&obj_hash[i].lock ->&k->list_lock ->gdp_mutex ->lock ->&root->kernfs_rwsem ->bus_type_sem ->sysfs_symlink_target_lock ->&____s->seqcount#2 ->&____s->seqcount ->&dev->power.lock ->dpm_list_mtx ->uevent_sock_mutex ->subsys mutex#84 ->&rxe->usdev_lock ->&zone->lock ->batched_entropy_u8.lock ->kfence_freelist_lock ->&n->list_lock ->rcu_node_0 ->remove_cache_srcu ->&cfs_rq->removed.lock ->&sem->wait_lock ->&p->pi_lock ->&rcu_state.expedited_wq ->&lock->wait_lock ->uevent_sock_mutex.wait_lock ->quarantine_lock ->pgd_lock ->key ->pcpu_lock ->percpu_counters_lock ->stock_lock FD: 1 BD: 148 ....: key#24 FD: 1 BD: 1 ....: _rs.lock#7 FD: 5 BD: 77 +...: &stab->lock ->&psock->link_lock FD: 1 BD: 72 ....: (&brmctx->ip4_mc_router_timer) FD: 1 BD: 72 ....: (&brmctx->ip4_other_query.timer) FD: 1 BD: 72 ....: (&brmctx->ip6_mc_router_timer) FD: 1 BD: 72 ....: (&brmctx->ip6_other_query.timer) FD: 46 BD: 73 +.+.: (work_completion)(&br->mcast_gc_work) ->&rq->__lock ->&br->multicast_lock ->(&p->rexmit_timer) ->&obj_hash[i].lock ->&base->lock ->(&p->timer) ->pool_lock#2 ->krc.lock ->(&mp->timer) ->rcu_node_0 ->&rcu_state.expedited_wq FD: 135 BD: 2 +.+.: (work_completion)(&crct10dif_rehash_work) ->crc_t10dif_mutex ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 160 ....: crypto_alg_sem.wait_lock FD: 29 BD: 77 +.-.: (&tw->tw_timer) ->&hashinfo->ehash_locks[i] ->&tcp_hashinfo.bhash[i].lock ->&obj_hash[i].lock ->&dccp_hashinfo.bhash[i].lock ->stock_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 193 BD: 77 +.+.: k-sk_lock-AF_INET/1 ->k-slock-AF_INET ->pool_lock#2 ->&dir->lock ->fs_reclaim ->k-clock-AF_INET ->&c->lock ->&hashinfo->ehash_locks[i] ->remove_cache_srcu ->tk_core.seq.seqcount ->&obj_hash[i].lock ->&base->lock ->&rq->__lock ->slock-AF_INET FD: 94 BD: 1 +.-.: k-slock-AF_INET/1 ->&obj_hash[i].lock ->pool_lock#2 ->tk_core.seq.seqcount ->slock-AF_INET ->&base->lock ->k-clock-AF_INET ->&c->lock ->&tcp_hashinfo.bhash[i].lock ->&hashinfo->ehash_locks[i] FD: 2 BD: 96 +.-.: fastopen_seqlock ->fastopen_seqlock.seqcount FD: 1 BD: 1 ....: _rs.lock#8 FD: 8 BD: 72 +.+.: mrt_lock ->pool_lock#2 ->&dir->lock#2 FD: 1 BD: 4962 ....: cid_lock FD: 1 BD: 75 +.+.: tcpv4_prot_mutex FD: 132 BD: 147 +.+.: &hugetlb_fault_mutex_table[i] ->&rq->__lock ->&vma_lock->rw_sema ->&sb->s_type->i_lock_key#16 FD: 1 BD: 74 +...: &net->can.rcvlists_lock FD: 2 BD: 83 ....: &new->fa_lock ->&f->f_owner.lock FD: 1 BD: 1 ....: &so->wait FD: 1 BD: 3 +...: ip6_ra_lock FD: 131 BD: 11 +.+.: nlk_cb_mutex-NETFILTER ->fs_reclaim ->pool_lock#2 ->&cnet->ecache.dying_lock ->&obj_hash[i].lock ->rlock-AF_NETLINK ->&c->lock ->&rq->__lock ->&n->list_lock ->ip_set_ref_lock FD: 1 BD: 12 +...: &cnet->ecache.dying_lock FD: 1 BD: 4 +.+.: chan_lock FD: 1 BD: 1 +...: &list->lock#39 FD: 14 BD: 1 +.-.: (&map->gc) ->&set->lock ->&obj_hash[i].lock ->&base->lock FD: 1 BD: 2 +.-.: &set->lock FD: 131 BD: 72 +.+.: team->team_lock_key#7 ->&rq->__lock ->mode_list_lock ->(console_sem).lock ->fs_reclaim ->pool_lock#2 ->&c->lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&n->list_lock FD: 2 BD: 4 +...: noop_qdisc.busylock ->noop_qdisc.q.lock FD: 131 BD: 72 +.+.: team->team_lock_key#8 ->mode_list_lock ->(console_sem).lock ->&rq->__lock ->fs_reclaim ->&c->lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock ->&n->list_lock FD: 131 BD: 72 +.+.: team->team_lock_key#9 ->mode_list_lock ->(console_sem).lock ->fs_reclaim ->&c->lock ->&n->list_lock ->&rq->__lock ->&obj_hash[i].lock ->nl_table_lock ->nl_table_wait.lock FD: 131 BD: 1 +.+.: &xn->hash_lock ->&rq->__lock ->fs_reclaim ->&c->lock ->pool_lock#2 ->&est->lock ->(&est->timer) ->&obj_hash[i].lock ->&base->lock ->krc.lock FD: 13 BD: 2 +...: &est->lock ->&obj_hash[i].lock ->&base->lock FD: 14 BD: 2 +.-.: (&est->timer) ->&est->seq ->&obj_hash[i].lock ->&base->lock FD: 1 BD: 3 +.+.: raw_notifier_lock FD: 1 BD: 78 +...: &qdisc_xmit_lock_key FD: 226 BD: 3 +.+.: sk_lock-AF_AX25 ->slock-AF_AX25 ->&mm->mmap_lock ->clock-AF_AX25 ->&rq->__lock ->ax25_list_lock ->&obj_hash[i].lock ->&list->lock#40 ->rlock-AF_AX25 ->wlock-AF_AX25 ->ax25_uid_lock ->&ei->socket.wq.wait ->ax25_dev_lock FD: 28 BD: 5 +.+.: &device->unregistration_lock ->&rq->__lock FD: 291 BD: 72 +.+.: bpf_dispatcher_xdp.mutex ->pack_mutex ->fs_reclaim ->&c->lock ->pool_lock#2 ->free_vmap_area_lock ->vmap_area_lock ->&rq->__lock ->&____s->seqcount ->init_mm.page_table_lock ->bpf_lock ->text_mutex ->cpu_hotplug_lock ->&obj_hash[i].lock ->&rnp->exp_lock ->rcu_state.exp_mutex ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock FD: 1 BD: 4 +...: slock-AF_AX25 FD: 1 BD: 4 +...: clock-AF_AX25 FD: 1 BD: 4 +...: ax25_list_lock FD: 1 BD: 4 ....: &list->lock#40 FD: 1 BD: 4 ....: rlock-AF_AX25 FD: 1 BD: 4 ....: wlock-AF_AX25 FD: 1 BD: 1 ....: _rs.lock#9 FD: 1 BD: 5 +.+.: &net->smc.mutex_fback_rsn FD: 1 BD: 1 ....: _rs.lock#10 FD: 1 BD: 2 +.+.: ppp_mutex.wait_lock FD: 1 BD: 5 +...: &pn->all_channels_lock FD: 29 BD: 4 +.+.: &pch->chan_sem ->&rq->__lock ->&pch->downl FD: 1 BD: 5 +...: &pch->downl FD: 1 BD: 4 +...: &pch->upl FD: 489 BD: 1 +.+.: sk_lock-AF_RXRPC ->slock-AF_RXRPC ->&rxnet->local_mutex ->&local->services_lock ->fs_reclaim ->pool_lock#2 ->&rx->call_lock ->(rxrpc_call_limiter).lock ->&obj_hash[i].lock ->tk_core.seq.seqcount ->&call->waitq ->&call->user_mutex ->&rx->recvmsg_lock ->&rq->__lock FD: 1 BD: 3 +...: slock-AF_RXRPC FD: 1 BD: 1 +.+.: dev_map_lock FD: 134 BD: 3 +.+.: sk_lock-AF_QIPCRTR ->slock-AF_QIPCRTR ->fs_reclaim ->qrtr_ports.xa_lock ->clock-AF_QIPCRTR ->pool_lock#2 ->qrtr_node_lock ->rlock-AF_QIPCRTR ->&rq->__lock ->&obj_hash[i].lock FD: 8 BD: 72 +.+.: mrt_lock#2 ->pool_lock#2 ->&dir->lock#2 FD: 1 BD: 1 +...: &list->lock#41 FD: 1 BD: 4 +...: slock-AF_QIPCRTR FD: 1 BD: 4 +...: clock-AF_QIPCRTR FD: 1 BD: 7 ....: rlock-AF_QIPCRTR FD: 135 BD: 1 +.+.: (wq_completion)qrtr_ns_handler ->(work_completion)(&qrtr_ns.work) FD: 134 BD: 2 +.+.: (work_completion)(&qrtr_ns.work) ->fs_reclaim ->pool_lock#2 ->k-sk_lock-AF_QIPCRTR ->k-slock-AF_QIPCRTR ->&c->lock ->nodes.xa_lock ->&obj_hash[i].lock FD: 2 BD: 3 +.+.: nodes.xa_lock ->pool_lock#2 FD: 1 BD: 75 +...: ip6_sk_fl_lock FD: 1 BD: 4 +.+.: oom_adj_mutex.wait_lock FD: 29 BD: 3762 ....: &tfile->socket.wq.wait ->&p->pi_lock FD: 1 BD: 3 +...: base_sockets.lock FD: 1 BD: 4 +...: clock-AF_ISDN FD: 14 BD: 1 +.-.: net/ipv6/ip6_flowlabel.c:47 ->ip6_fl_lock FD: 1 BD: 78 +...: _xmit_LOOPBACK#2 FD: 1 BD: 72 +.+.: &r->consumer_lock#3 FD: 1 BD: 72 +...: &r->consumer_lock#4 FD: 1 BD: 6 +...: key#25 FD: 57 BD: 100 +...: reuseport_lock ->pool_lock#2 ->reuseport_ida.xa_lock ->clock-AF_INET6 ->&obj_hash[i].lock ->&c->lock FD: 1 BD: 101 ..-.: reuseport_ida.xa_lock FD: 2 BD: 145 +.+.: (work_completion)(flush) ->&list->lock#5 FD: 224 BD: 72 +.+.: sk_lock-AF_UNSPEC ->&rq->__lock ->slock-AF_UNSPEC ->fs_reclaim ->pool_lock#2 ->free_vmap_area_lock ->vmap_area_lock ->&____s->seqcount ->stock_lock ->&c->lock ->pcpu_alloc_mutex ->&mm->mmap_lock ->&obj_hash[i].lock ->pack_mutex ->batched_entropy_u32.lock ->text_mutex ->&fp->aux->used_maps_mutex FD: 1 BD: 73 +...: slock-AF_UNSPEC FD: 1 BD: 1 ....: _rs.lock#11 FD: 1 BD: 151 +.+.: jump_label_mutex.wait_lock FD: 35 BD: 84 +...: &sub->lock ->&srv->idr_lock ->pool_lock#2 ->&con->outqueue_lock ->&c->lock FD: 40 BD: 1 +.+.: (wq_completion)tipc_send ->(work_completion)(&con->swork) FD: 39 BD: 4 +.+.: (work_completion)(&con->swork) ->&con->outqueue_lock ->&c->lock ->pool_lock#2 ->&list->lock#42 ->&obj_hash[i].lock ->&list->lock#23 ->&rq->__lock ->&____s->seqcount#2 ->&____s->seqcount ->&srv->idr_lock ->tk_core.seq.seqcount ->&list->lock#5 FD: 1 BD: 10 +...: &list->lock#42 FD: 19 BD: 3716 +...: _xmit_ETHER/2 ->&obj_hash[i].lock ->krc.lock ->pool_lock#2 FD: 135 BD: 3 +.+.: sk_lock-AF_NFC ->slock-AF_NFC ->&k->list_lock ->&k->k_lock ->llcp_devices_lock ->&rq->__lock ->fs_reclaim ->pool_lock#2 ->&local->sdp_lock ->&local->sockets.lock FD: 1 BD: 4 +...: slock-AF_NFC FD: 1 BD: 72 +...: mfc_unres_lock FD: 1 BD: 72 +...: mfc_unres_lock#2 FD: 1 BD: 72 +...: &bridge_netdev_addr_lock_key/2 FD: 1 BD: 72 +...: _xmit_NETROM#2 FD: 29 BD: 4490 ....: &x->wait#27 ->&p->pi_lock FD: 1 BD: 72 +...: _xmit_PHONET_PIPE FD: 33 BD: 1 ..-.: &(&conn->disc_work)->timer FD: 1 BD: 1 ....: _rs.lock#12 FD: 40 BD: 1 +.+.: (wq_completion)tipc_send#2 ->(work_completion)(&con->swork) FD: 1 BD: 73 +...: &tipc_net(net)->bclock FD: 1 BD: 6 +.+.: nf_sockopt_mutex.wait_lock FD: 1 BD: 75 ....: umem_ida.xa_lock FD: 1 BD: 6 +...: &pernet->lock FD: 46 BD: 3716 +...: &dev_addr_list_lock_key#3/2 ->&macvlan_netdev_addr_lock_key/1 ->pool_lock#2 ->&obj_hash[i].lock ->krc.lock FD: 152 BD: 1 +.+.: (wq_completion)bond5 ->(work_completion)(&(&slave->notify_work)->work) FD: 1 BD: 78 +...: &qdisc_xmit_lock_key#2 FD: 28 BD: 72 +.+.: (work_completion)(&port->wq) ->&rq->__lock FD: 39 BD: 2 +.+.: (work_completion)(&umem->work) ->umem_ida.xa_lock ->vmap_area_lock ->&obj_hash[i].lock ->purge_vmap_area_lock ->pool_lock#2 ->&lruvec->lru_lock ->rcu_node_0 ->&rq->__lock ->&cfs_rq->removed.lock FD: 152 BD: 1 +.+.: (wq_completion)bond6 ->(work_completion)(&(&slave->notify_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond8 ->(work_completion)(&(&slave->notify_work)->work) FD: 153 BD: 1 +.+.: (wq_completion)bond7 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&barr->work) FD: 1 BD: 12 ....: devices_rwsem.wait_lock FD: 138 BD: 2 +.+.: &call->user_mutex ->&rx->call_lock ->&rxnet->call_lock ->slock-AF_RXRPC ->fs_reclaim ->pool_lock#2 ->&rq->__lock ->tk_core.seq.seqcount ->&rxnet->peer_hash_lock ->&local->client_bundles_lock ->&local->client_call_lock ->&p->pi_lock ->&call->waitq ->&call->tx_lock ->&local->lock ->&list->lock#21 ->&c->lock FD: 1 BD: 3 +.+.: &local->client_bundles_lock FD: 1 BD: 3 +.+.: &local->client_call_lock FD: 1 BD: 3 +.+.: &call->tx_lock FD: 1 BD: 3 +...: &local->lock FD: 1 BD: 1 +.+.: &peer->lock FD: 1 BD: 1 ....: (&conn->timer) FD: 1 BD: 1 +.+.: (work_completion)(&conn->processor) FD: 1 BD: 1 ....: &list->lock#43 FD: 31 BD: 1 ..-.: &(&net->ipv6.addr_chk_work)->timer FD: 58 BD: 3 +.+.: &ep->mtx/1 ->&f->f_lock ->&ep->lock ->&u->lock ->&ws->lock ->&rq->__lock FD: 1 BD: 78 +...: &vlan_netdev_xmit_lock_key FD: 1 BD: 8 +.+.: unix_gc_lock FD: 21 BD: 1 +.+.: (wq_completion)phy17 ->(work_completion)(&local->reconfig_filter) FD: 21 BD: 1 +.+.: (wq_completion)phy18 ->(work_completion)(&local->reconfig_filter) FD: 21 BD: 1 +.+.: (wq_completion)phy16 ->(work_completion)(&local->reconfig_filter) FD: 153 BD: 3 +.+.: nlk_cb_mutex-SOCK_DIAG ->fs_reclaim ->&c->lock ->pool_lock#2 ->&____s->seqcount#2 ->&____s->seqcount ->inet_diag_table_mutex ->&obj_hash[i].lock ->&rq->__lock ->rlock-AF_NETLINK ->&net->unx.table.locks[i] ->&n->list_lock ->&net->packet.sklist_lock FD: 21 BD: 1 +.+.: (wq_completion)phy23 ->(work_completion)(&local->reconfig_filter) FD: 21 BD: 1 +.+.: (wq_completion)phy19 ->(work_completion)(&local->reconfig_filter) FD: 21 BD: 1 +.+.: (wq_completion)phy20 ->(work_completion)(&local->reconfig_filter) FD: 21 BD: 1 +.+.: (wq_completion)phy21 ->(work_completion)(&local->reconfig_filter) FD: 31 BD: 1 +.+.: (wq_completion)phy22 ->&rq->__lock ->(work_completion)(&local->reconfig_filter) FD: 21 BD: 1 +.+.: (wq_completion)phy24 ->(work_completion)(&local->reconfig_filter) FD: 2 BD: 1 +.+.: sk_lock-AF_KEY ->slock-AF_KEY FD: 1 BD: 2 +...: slock-AF_KEY FD: 49 BD: 76 +.-.: &bond->mode_lock ->(console_sem).lock ->&c->lock ->pool_lock#2 ->&n->list_lock FD: 62 BD: 2 +.+.: &pfk->dump_lock ->&net->xfrm.xfrm_policy_lock ->&net->xfrm.xfrm_state_lock FD: 29 BD: 73 +.+.: (work_completion)(&(&bond->mii_work)->work) ->&obj_hash[i].lock ->&base->lock ->&rq->__lock FD: 54 BD: 72 +.+.: (work_completion)(&(&bond->ad_work)->work) ->&bond->mode_lock ->&rq->__lock ->&obj_hash[i].lock ->&base->lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 1 BD: 78 +...: &qdisc_xmit_lock_key#3 FD: 1 BD: 72 +.+.: (work_completion)(&(&bond->arp_work)->work) FD: 53 BD: 73 +.+.: (work_completion)(&(&bond->alb_work)->work) ->&obj_hash[i].lock ->&base->lock ->rcu_node_0 ->&rq->__lock FD: 218 BD: 3688 +.+.: (work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&obj_hash[i].lock ->&base->lock ->pool_lock#2 ->rtnl_mutex.wait_lock ->&p->pi_lock ->rcu_node_0 ->&cfs_rq->removed.lock ->&rcu_state.expedited_wq ->stock_lock ->per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) FD: 28 BD: 72 +.+.: (work_completion)(&(&bond->slave_arr_work)->work) ->&obj_hash[i].lock ->&base->lock ->&rq->__lock FD: 1 BD: 78 +...: &batadv_netdev_xmit_lock_key FD: 1 BD: 4 +...: slock-AF_CAIF FD: 21 BD: 1 +.+.: (wq_completion)phy25 ->(work_completion)(&local->reconfig_filter) FD: 33 BD: 1 ..-.: &(&bond->slave_arr_work)->timer FD: 33 BD: 1 ..-.: &(&bond->mii_work)->timer FD: 33 BD: 1 ..-.: &(&bond->ad_work)->timer FD: 1 BD: 75 +.+.: (work_completion)(&(&priv->gc_work)->work) FD: 55 BD: 3 +.+.: sk_lock-AF_CAIF ->slock-AF_CAIF ->&obj_hash[i].lock ->elock-AF_CAIF ->&this->info_list_lock ->(console_sem).lock ->&rq->__lock ->&ei->socket.wq.wait ->clock-AF_CAIF ->console_owner_lock ->console_owner ->rcu_state.exp_mutex.wait_lock ->&p->pi_lock FD: 28 BD: 2 +.+.: &sdata->lock ->&rq->__lock FD: 1 BD: 4 ....: elock-AF_CAIF FD: 1 BD: 3 +...: rlock-AF_CAIF FD: 1 BD: 4 +...: clock-AF_CAIF FD: 102 BD: 73 +.-.: (&peer->timer_new_handshake) ->&peer->endpoint_lock FD: 152 BD: 1 +.+.: (wq_completion)bond7#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond4 ->(work_completion)(&(&slave->notify_work)->work) FD: 1 BD: 72 +...: &bond->ipsec_lock FD: 152 BD: 1 +.+.: (wq_completion)bond5#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond6#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 1 BD: 1 ....: _rs.lock#13 FD: 32 BD: 2 +.+.: (work_completion)(&(&hinfo->gc_work)->work) ->&hinfo->lock ->&rq->__lock ->&obj_hash[i].lock ->&base->lock ->&cfs_rq->removed.lock ->pool_lock#2 ->rcu_node_0 ->&rcu_state.expedited_wq FD: 1 BD: 3 +...: &hinfo->lock FD: 1 BD: 104 +.+.: dev_pm_qos_sysfs_mtx.wait_lock FD: 1 BD: 23 +.+.: (work_completion)(&(&conn->id_addr_timer)->work) FD: 1 BD: 22 +.+.: (work_completion)(&(&conn->auto_accept_work)->work) FD: 1 BD: 22 +.+.: (work_completion)(&(&conn->idle_work)->work) FD: 133 BD: 3 +.+.: crypto_default_null_skcipher_lock ->&rq->__lock ->crypto_alg_sem ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock FD: 1 BD: 1 ....: _rs.lock#14 FD: 33 BD: 1 +.-.: (&pool->idle_timer) ->&pool->lock/1 ->&pool->lock FD: 1 BD: 13 ....: rdma_nets_rwsem.wait_lock FD: 13 BD: 1 +.-.: (t) ->&obj_hash[i].lock ->&base->lock FD: 1 BD: 27 ....: namespace_sem.wait_lock FD: 5 BD: 2 +.+.: (ima_keys_delayed_work).work ->ima_keys_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 78 +...: &qdisc_xmit_lock_key#4 FD: 31 BD: 1 ..-.: security/integrity/ima/ima_queue_keys.c:35 FD: 22 BD: 1 +.-.: (&q->adapt_timer)#2 FD: 1 BD: 7 +...: &list->lock#44 FD: 17 BD: 3785 -.-.: &q->current_entry_lock ->hrtimer_bases.lock FD: 1 BD: 7 +...: &vvs->rx_lock FD: 230 BD: 1 +.+.: (wq_completion)vsock-loopback ->(work_completion)(&vsock->pkt_work) FD: 229 BD: 2 +.+.: (work_completion)(&vsock->pkt_work) ->&list->lock#44 ->vsock_table_lock ->sk_lock-AF_VSOCK ->slock-AF_VSOCK ->&obj_hash[i].lock ->pool_lock#2 FD: 137 BD: 6 +.+.: sk_lock-AF_VSOCK/1 ->slock-AF_VSOCK ->fs_reclaim ->pool_lock#2 ->&vvs->tx_lock ->vsock_table_lock ->&vvs->rx_lock ->&list->lock#44 ->&rq->__lock ->&obj_hash[i].lock ->&base->lock ->clock-AF_VSOCK ->rlock-AF_VSOCK FD: 1 BD: 7 +...: &vvs->tx_lock FD: 2 BD: 4 +.+.: &po->pg_vec_lock ->rlock-AF_PACKET FD: 128 BD: 72 +.+.: &tn->idrinfo->lock#3 ->fs_reclaim ->pool_lock#2 ->&obj_hash[i].lock FD: 227 BD: 1 +.+.: (wq_completion)bond1 ->(work_completion)(&(&bond->alb_work)->work) ->(work_completion)(&(&bond->mii_work)->work) ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 33 BD: 1 ..-.: &(&bond->alb_work)->timer FD: 33 BD: 1 ..-.: &(&bond->mcast_work)->timer FD: 1 BD: 1 ....: _rs.lock#15 FD: 1 BD: 78 +...: _xmit_NONE#2 FD: 1 BD: 1 ....: _rs.lock#16 FD: 138 BD: 4 +.+.: sk_lock-AF_TIPC/1 ->slock-AF_TIPC ->&obj_hash[i].lock ->&base->lock ->fs_reclaim ->pool_lock#2 ->&list->lock#23 ->&rq->__lock FD: 128 BD: 72 +.+.: &tn->idrinfo->lock#4 ->fs_reclaim ->pool_lock#2 ->&c->lock FD: 1 BD: 17 +.+.: &pool->flush_lock FD: 904 BD: 2 +.+.: nlk_cb_mutex-RDMA ->fs_reclaim ->pool_lock#2 ->&c->lock ->&n->list_lock ->devices_rwsem ->rlock-AF_NETLINK FD: 1 BD: 1 .+.+: hidp_session_sem FD: 41 BD: 1 +.-.: (&sk->sk_timer)#2 ->slock-AF_NETROM ->nr_list_lock ->&obj_hash[i].lock ->wlock-AF_NETROM ->&list->lock#20 ->rlock-AF_NETROM FD: 1 BD: 72 +.+.: &tn->idrinfo->lock#5 FD: 72 BD: 2 +.+.: (work_completion)(&work->work)#3 ->pool_lock#2 ->&dir->lock#2 ->&ul->lock#2 ->&obj_hash[i].lock ->&c->lock ->&____s->seqcount ->&n->list_lock ->&____s->seqcount#2 ->&rq->__lock FD: 131 BD: 1 .+.+: kn->active#62 ->&rq->__lock ->fs_reclaim ->stock_lock ->&kernfs_locks->open_file_mutex[count] ->&c->lock FD: 133 BD: 75 +.+.: tcp_md5sig_mutex ->&rq->__lock ->crypto_alg_sem ->fs_reclaim ->pool_lock#2 FD: 31 BD: 1 ..-.: net/ipv4/tcp_ipv4.c:1064 FD: 284 BD: 2 +.+.: ((tcp_md5_needed).work).work ->cpu_hotplug_lock FD: 2 BD: 9 +.+.: &id_priv->handler_mutex ->&id_priv->lock FD: 1 BD: 9 ....: &x->wait#28 FD: 2 BD: 7 ....: rds_conn_lock ->rds_cong_lock FD: 56 BD: 7 +.+.: &tc->t_conn_path_lock ->clock-AF_INET6 ->&cp->cp_lock ->pool_lock#2 ->&rq->__lock FD: 1 BD: 133 +...: rds_tcp_tc_list_lock FD: 1 BD: 106 ..-.: &cp->cp_lock FD: 405 BD: 6 +.+.: (work_completion)(&(&cp->cp_send_w)->work) ->k-sk_lock-AF_INET6 ->k-slock-AF_INET6 ->pool_lock#2 ->&obj_hash[i].lock ->&cp->cp_lock ->&c->lock FD: 1 BD: 106 ....: &rm->m_rs_lock FD: 405 BD: 6 +.+.: (work_completion)(&(&cp->cp_recv_w)->work) ->k-sk_lock-AF_INET6 ->k-slock-AF_INET6 FD: 1 BD: 106 ..-.: &list->lock#45 FD: 401 BD: 6 +.+.: (work_completion)(&cp->cp_down_w) ->&cp->cp_cm_lock ->k-sk_lock-AF_INET6 ->k-slock-AF_INET6 ->pool_lock#2 ->&dir->lock ->&obj_hash[i].lock ->&sb->s_type->i_lock_key#8 ->&xa->xa_lock#8 ->&fsnotify_mark_srcu ->&cp->cp_lock ->(work_completion)(&(&cp->cp_conn_w)->work) ->&list->lock#45 FD: 1 BD: 7 +.+.: &cp->cp_cm_lock FD: 1 BD: 7 +.+.: (work_completion)(&(&cp->cp_conn_w)->work) FD: 1 BD: 4 +.+.: llcp_devices_lock FD: 2 BD: 4 +.+.: &local->sdp_lock ->&local->sockets.lock FD: 1 BD: 5 ++++: &local->sockets.lock FD: 1 BD: 75 +.+.: &ping_table.lock FD: 40 BD: 74 +.-.: (&p->timer) ->&br->multicast_lock FD: 1 BD: 1 ....: &wq#4 FD: 1 BD: 1 +.+.: &s->lock FD: 40 BD: 74 +.-.: (&mp->timer) ->&br->multicast_lock FD: 1 BD: 74 ....: (&p->rexmit_timer) FD: 448 BD: 1 +.+.: (wq_completion)kstrp ->(work_completion)(&strp->work)#2 FD: 33 BD: 73 +.-.: (&peer->timer_zero_key_material) FD: 133 BD: 83 +.+.: (work_completion)(&peer->clear_peer_work) ->&handshake->lock ->&peer->keypairs.keypair_update_lock ->&rq->__lock FD: 447 BD: 4 +.+.: (work_completion)(&strp->work)#2 ->sk_lock-AF_INET ->slock-AF_INET FD: 1 BD: 3 +.+.: (work_completion)(&(&strp->msg_timer_work)->work) FD: 1 BD: 1 +.+.: &r->consumer_lock#5 FD: 29 BD: 1 ....: &x->wait#29 ->&p->pi_lock FD: 45 BD: 2 +.+.: (work_completion)(&old_rcpu->kthread_stop_wq) ->rcu_state.barrier_mutex ->rcu_state.barrier_mutex.wait_lock ->&p->pi_lock ->&x->wait ->&pool->lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 3 +...: data_sockets.lock FD: 4 BD: 3 +.+.: sk_lock-AF_ISDN ->slock-AF_ISDN ->clock-AF_ISDN ->rlock-AF_ISDN FD: 1 BD: 4 +...: slock-AF_ISDN FD: 1 BD: 4 ....: rlock-AF_ISDN FD: 286 BD: 1 .+.+: kn->active#63 ->fs_reclaim ->stock_lock ->&kernfs_locks->open_file_mutex[count] ->cpu_hotplug_lock ->&c->lock FD: 28 BD: 1 +.+.: (work_completion)(&(&lb_priv->ex->stats.refresh_dw)->work) ->&rq->__lock FD: 1 BD: 1 +.+.: (work_completion)(&(&team->mcast_rejoin.dw)->work) FD: 1 BD: 1 +.+.: (work_completion)(&(&team->notify_peers.dw)->work) FD: 136 BD: 1 ++++: kn->active#64 ->fs_reclaim ->stock_lock ->&kernfs_locks->open_file_mutex[count] ->&c->lock ->&cgrp->pidlist_mutex FD: 1 BD: 4 +.+.: raw_sk_list.lock FD: 1 BD: 72 ....: (&pmctx->ip6_mc_router_timer) FD: 31 BD: 1 ..-.: &(&l->destroy_dwork)->timer FD: 133 BD: 4 +.+.: (work_completion)(&(&l->destroy_dwork)->work) ->&cgrp->pidlist_mutex ->&obj_hash[i].lock FD: 1 BD: 3 +.+.: nfnl_grp_active_lock FD: 1 BD: 1 ....: _rs.lock#17 FD: 1 BD: 72 ....: (&pmctx->ip4_mc_router_timer) FD: 128 BD: 72 +.+.: &tn->idrinfo->lock#6 ->fs_reclaim ->&c->lock ->pool_lock#2 ->&____s->seqcount#2 ->&n->list_lock FD: 46 BD: 72 +...: &dev_addr_list_lock_key/2 ->&bridge_netdev_addr_lock_key/1 FD: 1 BD: 72 +.+.: &bond->stats_lock/2 FD: 58 BD: 72 +.-.: (&br->hello_timer) ->&br->lock FD: 1 BD: 72 ....: (&br->topology_change_timer) FD: 1 BD: 72 ....: (&br->tcn_timer) FD: 28 BD: 73 +.+.: &nmap->mutex ->&rq->__lock FD: 1 BD: 1 +...: &tp_vars->unacked_lock FD: 9 BD: 6 +...: &bat_priv->tp_list_lock ->&c->lock ->pool_lock#2 ->&obj_hash[i].lock FD: 1 BD: 1 ....: (&tp_vars->timer) FD: 40 BD: 1 +.+.: (wq_completion)tipc_send#3 ->(work_completion)(&con->swork) FD: 1 BD: 1 ....: _rs.lock#18 FD: 1 BD: 78 +...: _xmit_TUNNEL#2 FD: 1 BD: 78 +...: _xmit_IPGRE#2 FD: 2 BD: 72 ....: &r->consumer_lock#6 ->&r->producer_lock#3 FD: 1 BD: 82 +.-.: &r->producer_lock#3 FD: 1 BD: 78 +...: _xmit_SIT#2 FD: 1 BD: 78 +...: &qdisc_xmit_lock_key#5 FD: 2 BD: 80 +...: &____s->seqcount#16 ->pool_lock#2 FD: 213 BD: 4 +.+.: rfcomm_ioctl_mutex ->&mm->mmap_lock FD: 1 BD: 6 +.+.: ebt_mutex.wait_lock FD: 1 BD: 75 +.+.: acaddr_hash_lock FD: 1 BD: 74 ....: &pool->xsk_tx_list_lock FD: 1 BD: 13 +.+.: (work_completion)(&data->fib_flush_work) FD: 871 BD: 2 +.+.: (work_completion)(&pool->work) ->&rq->__lock ->rtnl_mutex ->vmap_area_lock ->&obj_hash[i].lock ->purge_vmap_area_lock ->pool_lock#2 ->umem_ida.xa_lock ->&zone->lock FD: 1 BD: 13 +.+.: ®ion->snapshot_lock FD: 36 BD: 2 +.+.: (work_completion)(&pool->idle_cull_work) ->wq_pool_attach_mutex ->wq_pool_attach_mutex.wait_lock ->&p->pi_lock ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->pool_lock#2 FD: 1 BD: 151 +.+.: wq_pool_attach_mutex.wait_lock FD: 152 BD: 1 +.+.: (wq_completion)tipc_rcv#2 ->(work_completion)(&srv->awork) ->(work_completion)(&con->rwork) FD: 1 BD: 82 ....: key#26 FD: 1 BD: 74 +.-.: &est->seq FD: 1 BD: 73 +.+.: &tp->lock FD: 1 BD: 72 +...: &pmc->lock FD: 508 BD: 1 +.+.: &ctx->tx_lock ->sk_lock-AF_INET6 ->slock-AF_INET6 ->&rq->__lock ->rcu_node_0 FD: 28 BD: 10 +.+.: (work_completion)(&(&hdev->interleave_scan)->work) ->&rq->__lock FD: 1 BD: 10 +.+.: (work_completion)(&(&hdev->rpa_expired)->work) FD: 1 BD: 1 ....: _rs.lock#19 FD: 132 BD: 1 .+.+: kn->active#65 ->fs_reclaim ->&c->lock ->stock_lock ->&kernfs_locks->open_file_mutex[count] ->devcgroup_mutex FD: 1 BD: 4 +...: smc_v6_hashinfo.lock FD: 46 BD: 3716 +...: &macvlan_netdev_addr_lock_key/2 ->&vlan_netdev_addr_lock_key/1 ->pool_lock#2 ->&obj_hash[i].lock ->krc.lock FD: 94 BD: 1 +.-.: (&msk->sk.icsk_retransmit_timer) ->slock-AF_INET FD: 1 BD: 1 ....: _rs.lock#20 FD: 286 BD: 1 .+.+: kn->active#66 ->fs_reclaim ->stock_lock ->&kernfs_locks->open_file_mutex[count] ->cpu_hotplug_lock FD: 1 BD: 4 +...: ax25_dev_lock FD: 1 BD: 1 ....: _rs.lock#21 FD: 1 BD: 6 +.+.: calipso_doi_list_lock FD: 129 BD: 5 +.+.: reg_lock ->fs_reclaim ->&c->lock ->pool_lock#2 ->&obj_hash[i].lock FD: 2 BD: 149 +.+.: lock#10 FD: 2 BD: 3788 +...: &(&n->hh.hh_lock)->lock ->&____s->seqcount#10 FD: 1 BD: 1 ....: _rs.lock#22 FD: 1 BD: 1 ....: &head->lock FD: 133 BD: 1 +.+.: mem_id_lock ->&rq->__lock ->fs_reclaim ->pool_lock#2 ->batched_entropy_u32.lock ->&obj_hash[i].lock ->mem_id_pool.xa_lock ->&ht->lock FD: 1 BD: 1 ....: _rs.lock#23 FD: 1 BD: 6 +...: &inst->lock FD: 1 BD: 2 ..-.: mem_id_pool.xa_lock FD: 1 BD: 1 +...: &r->producer_lock#4 FD: 31 BD: 1 ..-.: &(&hinfo->gc_work)->timer FD: 131 BD: 1 .+.+: kn->active#67 ->fs_reclaim ->stock_lock ->&rq->__lock ->&kernfs_locks->open_file_mutex[count] ->&c->lock FD: 1 BD: 1 +.+.: &type->s_umount_key#48 FD: 1 BD: 74 +...: &priv->lock FD: 1 BD: 73 +...: &priv->j1939_socks_lock FD: 1 BD: 73 +...: &jsk->sk_session_queue_lock FD: 2 BD: 1 +.-.: (&policy->timer) ->&policy->lock FD: 131 BD: 1 .+.+: kn->active#68 ->fs_reclaim ->stock_lock ->&kernfs_locks->open_file_mutex[count] FD: 128 BD: 72 +.+.: &tn->idrinfo->lock#7 ->fs_reclaim ->&c->lock ->pool_lock#2 ->&obj_hash[i].lock FD: 131 BD: 1 .+.+: kn->active#69 ->fs_reclaim ->&kernfs_locks->open_file_mutex[count] FD: 35 BD: 1 +.+.: sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP/2 ->slock-AF_BLUETOOTH-BTPROTO_L2CAP ->&ei->socket.wq.wait FD: 1 BD: 1 ....: _rs.lock#24 FD: 130 BD: 75 +.+.: &sb->s_type->i_mutex_key#3/1 ->rename_lock.seqcount ->fs_reclaim ->stock_lock ->&c->lock ->pool_lock#2 ->&dentry->d_lock ->tk_core.seq.seqcount ->rename_lock ->&rq->__lock FD: 1 BD: 10 +.+.: ovs_mutex.wait_lock FD: 13 BD: 1 +.-.: (&tsc_sync_check_timer) ->&obj_hash[i].lock ->&base->lock FD: 1 BD: 85 +.+.: pack_mutex.wait_lock FD: 31 BD: 1 ..-.: net/ipv4/devinet.c:474 FD: 1 BD: 190 +.+.: gdp_mutex.wait_lock FD: 1 BD: 1 ....: _rs.lock#25 FD: 1 BD: 1 +.+.: bpf_module_mutex FD: 220 BD: 1 +.+.: (wq_completion)bond1#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond4#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond5#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond6#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond7#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 29 BD: 5 ....: &sk->sk_lock.wq#2 ->&p->pi_lock FD: 220 BD: 1 +.+.: (wq_completion)bond8#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond9 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond10 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond17 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond18 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond8#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond19 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond9#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond20 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond10#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond21 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond22 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond23 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond24 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond25 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 79 ....: key#27 FD: 220 BD: 1 +.+.: (wq_completion)bond26 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond27 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond28 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond29 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond18#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond17#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond2#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond39 ->(work_completion)(&(&slave->notify_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond40 ->(work_completion)(&(&slave->notify_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond15 ->(work_completion)(&(&slave->notify_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond41 ->(work_completion)(&(&slave->notify_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond16 ->(work_completion)(&(&slave->notify_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond42 ->(work_completion)(&(&slave->notify_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond19#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond3#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond43 ->(work_completion)(&(&slave->notify_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond44 ->(work_completion)(&(&slave->notify_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond45 ->(work_completion)(&(&slave->notify_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond20#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond4#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond46 ->(work_completion)(&(&slave->notify_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond17#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond47 ->(work_completion)(&(&slave->notify_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond48 ->(work_completion)(&(&slave->notify_work)->work) FD: 131 BD: 1 .+.+: kn->active#70 ->fs_reclaim ->stock_lock ->&kernfs_locks->open_file_mutex[count] ->&c->lock FD: 220 BD: 1 +.+.: (wq_completion)bond21#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond49 ->(work_completion)(&(&slave->notify_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond18#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond50 ->(work_completion)(&(&slave->notify_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond22#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond19#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond23#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond24#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond25#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond26#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond27#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond28#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond29#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond75 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond30 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond76 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond77 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond78 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond79 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond80 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond81 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond82 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond11 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond12 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond84 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond26#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond13 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond85 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond27#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond40#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond41#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond14 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond42#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond86 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond43#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond28#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond44#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond45#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond46#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond15#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond87 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond29#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond47#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond48#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond16#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond49#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond88 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond30#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond50#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond89 ->(work_completion)(&(&slave->notify_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond17#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond51 ->(work_completion)(&(&slave->notify_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond31 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond90 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond18#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond19#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond91 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond33 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond20#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond92 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond34 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond21#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond35 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond36 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond37 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond39#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond38 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond93 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond40#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond6#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond41#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond94 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond42#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond95 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond43#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond44#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond96 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond97 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond45#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond9#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond98 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond46#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond10#3 ->(work_completion)(&(&slave->notify_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond99 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond47#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond48#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond100 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond49#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond11#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond50#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond12#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond101 ->(work_completion)(&(&slave->notify_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond51#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond102 ->(work_completion)(&(&slave->notify_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond52 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond103 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond14#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond53 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond54 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond55 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond105 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond56 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond76#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond106 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond57 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond15#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond77#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond58 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond78#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond16#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond108 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond79#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond17#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond109 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond31#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond80#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond32 ->(work_completion)(&(&slave->notify_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond81#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond33#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond110 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond34#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond18#5 ->(work_completion)(&(&slave->notify_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond82#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond35#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond36#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond83 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond19#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond37#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond84#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond38#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond40#4 ->(work_completion)(&(&slave->notify_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond39#3 ->(work_completion)(&(&slave->notify_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond41#4 ->(work_completion)(&(&slave->notify_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond42#4 ->(work_completion)(&(&slave->notify_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond112 ->(work_completion)(&(&slave->notify_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond87#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond116 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond117 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond25#3 ->(work_completion)(&(&slave->notify_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond27#4 ->(work_completion)(&(&slave->notify_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond88#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond89#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond28#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond29#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond120 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond30#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond91#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond67 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond69 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond68 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond70 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond71 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond72 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond73 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond36#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond78#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond125 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond126 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond38#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond80#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond127 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond39#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond128 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond40#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond129 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond41#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond130 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond42#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond131 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond43#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond132 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond44#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond133 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond45#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond87#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond134 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond46#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond88#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond135 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond47#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond92#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond89#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond136 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond48#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond93#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond90#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond137 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond49#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond94#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond91#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond138 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond50#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 152 BD: 1 +.+.: (wq_completion)bond68#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond92#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond139 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond93#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond140 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond52#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond95#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 152 BD: 1 +.+.: (wq_completion)bond69#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond94#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond141 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond53#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond96#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond95#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond142 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond70#2 ->(work_completion)(&(&slave->notify_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond54#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond97#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond96#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond143 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond55#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond71#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond98#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond97#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond144 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond56#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond98#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond72#2 ->(work_completion)(&(&slave->notify_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond145 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond99#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond57#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond146 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond73#2 ->(work_completion)(&(&slave->notify_work)->work) ->&rq->__lock FD: 152 BD: 1 +.+.: (wq_completion)bond100#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond58#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond100#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond147 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond59 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond9#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond101#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond74 ->(work_completion)(&(&slave->notify_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond148 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond101#3 ->(work_completion)(&(&slave->notify_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond60 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond10#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond102#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond149 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond61 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond75#2 ->(work_completion)(&(&slave->notify_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond102#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond11#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond103#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond150 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond62 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond12#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond103#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond104 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond151 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond63 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond13#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond104#2 ->(work_completion)(&(&slave->notify_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond77#3 ->(work_completion)(&(&slave->notify_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond105#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond152 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond64 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond14#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond106#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond153 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond105#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond78#4 ->(work_completion)(&(&slave->notify_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond65 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond15#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond107 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond154 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 152 BD: 1 +.+.: (wq_completion)bond106#3 ->(work_completion)(&(&slave->notify_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond66 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond16#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond79#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond108#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond155 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond107#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond67#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond17#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond109#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond156 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond108#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond68#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond18#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond110#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond157 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond109#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond69#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond19#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond158 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond70#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond112#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond20#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond159 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond71#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond110#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond113 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond21#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond160 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond72#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond111 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond114 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond22#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond161 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond73#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond112#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond115 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond23#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond162 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond74#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond116#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond113#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond24#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond163 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond75#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond117#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond114#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond25#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond164 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond76#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond118 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond115#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond26#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond165 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond77#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond119 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond116#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond27#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond166 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond78#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond120#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond117#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond28#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond167 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond79#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond121 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond118#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond29#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond168 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond80#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond122 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond119#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond169 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond81#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond123 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond31#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond170 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond82#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond124 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond32#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond171 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond83#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond125#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond33#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond172 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond84#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond126#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond120#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond173 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond34#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond85#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond127#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond121#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond174 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond35#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond86#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond128#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond122#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond175 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond36#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond87#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond129#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond123#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond176 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond88#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond130#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond124#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond177 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond89#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond131#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond125#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond178 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond90#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond132#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond126#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond179 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond91#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond133#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond127#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond180 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond92#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond134#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond181 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond93#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond135#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond37#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond182 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond128#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond94#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond136#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond183 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond129#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond95#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond137#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond184 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond38#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond96#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond138#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond185 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond39#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond97#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond139#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond186 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond98#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond140#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond187 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond99#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond40#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond141#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond130#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond188 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond41#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond100#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond142#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond131#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond189 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond101#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond143#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond132#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond190 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond102#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond144#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond42#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond191 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond103#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond145#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond43#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond192 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond104#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond146#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond44#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond193 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond194 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond105#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond106#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond147#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond195 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond107#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond148#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond196 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond108#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond197 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond149#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond109#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond198 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond150#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond110#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond199 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond151#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond111#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond152#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond200 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond112#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond153#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond201 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond154#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond113#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond202 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond155#2 ->(work_completion)(&(&slave->notify_work)->work) ->&rq->__lock ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond114#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond203 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond156#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond115#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond204 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond157#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond116#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond158#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond205 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond117#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond159#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond206 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond118#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond160#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond207 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond119#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond161#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond208 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond120#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond162#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond209 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond121#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond163#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond122#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond164#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond211 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond123#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond165#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond212 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond124#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond166#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond213 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond125#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond167#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond214 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond126#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond168#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond215 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond127#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond169#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond216 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond128#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond170#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond217 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond129#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond171#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond218 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond130#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond172#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond131#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond219 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond173#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond104#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond220 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond133#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond105#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond221 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond134#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond175#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond222 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond106#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond135#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond176#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond223 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond136#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond107#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond177#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond224 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond137#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond108#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond178#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond225 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond138#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond179#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond109#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond226 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond139#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond180#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond227 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond140#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond110#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond181#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond228 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond141#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond111#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond182#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond229 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond142#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond183#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond112#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond230 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond143#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond184#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond113#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond231 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond144#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond185#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond114#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond232 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond145#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond186#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond115#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond233 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond146#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond187#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond116#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond234 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond147#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond188#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond235 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond148#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond189#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond236 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond149#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond117#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond190#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond237 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond150#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond118#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond191#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond238 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond151#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond192#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond239 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond152#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond193#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond240 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond119#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond153#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond194#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond241 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond154#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond195#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond242 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond155#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond196#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond243 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond120#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond156#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond197#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond244 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond121#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond157#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond245 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond198#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond122#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond158#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond246 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond123#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond200#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond247 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond159#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond124#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond201#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond125#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond160#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond248 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond202#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond161#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond249 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond203#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond162#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond250 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond204#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond163#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond126#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond251 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond205#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond164#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond127#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond252 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond206#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond165#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond128#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond253 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond207#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond129#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond254 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond208#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond167#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond130#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond255 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond209#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond168#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond131#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond256 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond169#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond210 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond257 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond170#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond211#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond258 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond171#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond132#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond212#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond259 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond172#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond133#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond213#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond260 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond173#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond134#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond214#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond261 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond174#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond135#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond215#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond262 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond175#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond136#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond216#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond263 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond176#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond137#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond217#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond264 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond177#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond138#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond218#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond265 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond178#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond139#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond219#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond266 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond179#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond140#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond220#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond267 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond180#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond141#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond221#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond268 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond181#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond222#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond143#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond223#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond270 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond183#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond271 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond224#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond184#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond272 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond144#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond225#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond185#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond273 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond226#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond186#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond274 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond227#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond187#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond145#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond275 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond228#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond188#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond229#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond146#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond276 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond189#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond230#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond147#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond277 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond190#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond231#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond148#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond278 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond191#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 220 BD: 1 +.+.: (wq_completion)bond232#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond149#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond279 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond192#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond233#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond280 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond193#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond234#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond281 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond194#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond235#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond150#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond282 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond195#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond236#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond151#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond283 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond196#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond237#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond152#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond284 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond197#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond238#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond153#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond285 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond198#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond239#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond286 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond199#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond240#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond154#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond287 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond200#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond241#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond155#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond288 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond201#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond242#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond156#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond289 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond202#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond243#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond157#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond290 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond203#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond244#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond158#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond291 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond204#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond245#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond292 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond205#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond246#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond293 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond206#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond247#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond294 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond207#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond248#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond295 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond159#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond208#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond249#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond296 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond160#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond209#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond250#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond297 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond161#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond210#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond251#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond211#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond252#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond299 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond162#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond212#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond253#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond300 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond163#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond213#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond254#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond301 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond164#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond214#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond255#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond302 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond165#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond215#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond256#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond303 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond216#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond257#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond304 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond217#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond258#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond305 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond218#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond259#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond306 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond167#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond260#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond219#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond307 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond261#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond220#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond308 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond221#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond262#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond168#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond309 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond222#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond263#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond169#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond310 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond223#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond264#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond170#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond311 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond224#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond265#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond171#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond225#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond266#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond172#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond312 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond226#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond267#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond173#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond313 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond227#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond268#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond228#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond314 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond269 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond229#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond315 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond175#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond270#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond230#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond316 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond176#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond271#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond231#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond317 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond177#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond272#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond232#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond318 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond178#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond273#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond233#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond319 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond179#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond274#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 220 BD: 1 +.+.: (wq_completion)bond234#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond320 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond180#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond275#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond235#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond321 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond181#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond276#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond236#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond322 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond182#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond277#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond237#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond323 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond183#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond278#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond238#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond324 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond184#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond279#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond239#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond325 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond185#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond240#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond280#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond326 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond241#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond281#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond327 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond282#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond242#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond328 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond186#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond283#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond243#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond329 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond187#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond284#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond244#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond330 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond285#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond245#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond331 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond286#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 220 BD: 1 +.+.: (wq_completion)bond246#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond188#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond332 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond287#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond247#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond333 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond288#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond248#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond334 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond289#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond189#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond335 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond249#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond290#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond336 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond250#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond291#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond337 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond251#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond190#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond292#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond338 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond252#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond293#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond191#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond339 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond253#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond294#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond192#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond340 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond254#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond295#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond193#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond341 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond255#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond296#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond342 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond256#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond297#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond343 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond257#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond298 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond344 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond258#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond299#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond194#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond345 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond259#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond300#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond195#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond346 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond260#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond301#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond196#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond347 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond261#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond302#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond197#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond348 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond262#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond303#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond198#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond349 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond263#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond304#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond199#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond264#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond305#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond200#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond351 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond265#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond306#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond201#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond352 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond266#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond307#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond45#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond202#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond353 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond267#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond308#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond46#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond203#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond354 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond268#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond309#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond47#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond204#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond355 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond269#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond310#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond205#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond356 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond270#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond311#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond206#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond357 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond271#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond312#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond207#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond358 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond272#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond48#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond313#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond208#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond359 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond273#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond49#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond314#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond209#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond360 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond274#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond50#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond315#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond210#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond361 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond275#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond51#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond316#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond211#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond362 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond276#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond52#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond317#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond212#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond363 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond277#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond53#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond318#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond278#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond54#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond364 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond319#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond365 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond279#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond55#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond320#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond366 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond213#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond280#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond56#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond321#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond367 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond281#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond322#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond368 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond282#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond214#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond323#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond57#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond369 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond283#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond324#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond58#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond370 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond284#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond215#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond59#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond285#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond216#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond325#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond60#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond371 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond286#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond217#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond326#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond372 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond61#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond287#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond218#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond327#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond373 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond288#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond328#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond374 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond62#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond219#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond329#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond375 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond63#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond289#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond220#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond330#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond376 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond64#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond290#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond221#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond331#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond377 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond65#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond291#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond222#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond332#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond378 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond292#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond66#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond223#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond333#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond379 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond293#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond67#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond224#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond334#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond380 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond294#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond68#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond225#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond335#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond381 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond295#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond69#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond226#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond296#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond70#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond227#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond336#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond382 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond297#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond337#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond228#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond383 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond298#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond71#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond338#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond229#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond384 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond299#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond72#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond339#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond230#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond385 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond300#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond73#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond340#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond231#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond386 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond301#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond341#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond74#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond232#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond387 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond302#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond342#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond75#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond388 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond233#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond303#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond343#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond76#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond389 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond234#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond304#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond344#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond77#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond390 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond235#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond305#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond345#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond78#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond391 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond236#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond306#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond346#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond79#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond392 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond237#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond347#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond238#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 1 BD: 179 +.+.: rcu_state.exp_wake_mutex.wait_lock FD: 220 BD: 1 +.+.: (wq_completion)bond307#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond239#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond308#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond348#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond80#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond394 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond309#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond349#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond81#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond395 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond310#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond241#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond350 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond82#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond396 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond311#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond242#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond351#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond83#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond397 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond312#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond243#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond352#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond84#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond313#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond244#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond353#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond314#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond398 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond245#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond354#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond86#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond399 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond246#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond355#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond87#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond316#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond400 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond356#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond88#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond317#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond401 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond357#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond247#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond89#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond318#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond402 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond358#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond248#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond90#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond319#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond359#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond249#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond91#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond403 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond320#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond360#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond250#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond92#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond404 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond321#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond361#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond251#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond405 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond322#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond362#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond252#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond93#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond406 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond323#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond363#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond253#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond407 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond324#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond364#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond254#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond94#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond408 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 220 BD: 1 +.+.: (wq_completion)bond325#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond365#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond255#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond409 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond326#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond366#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond256#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond95#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond367#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond257#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond411 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond96#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond327#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond368#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond258#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond412 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond97#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond328#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond369#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond413 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond259#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond98#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond329#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond370#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond260#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond414 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond99#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond330#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond371#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond415 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond100#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond331#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond372#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond416 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond262#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond101#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond332#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond373#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond417 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond263#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond102#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond333#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond374#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond418 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond264#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond103#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond334#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond375#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond419 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond335#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond376#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond420 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond104#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond265#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond336#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond377#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond421 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond105#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond266#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond378#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond422 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond106#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond267#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond379#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond107#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond268#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond380#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond337#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond423 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond108#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond269#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond381#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond338#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond424 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond109#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond270#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond382#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond339#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond425 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond383#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond340#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond426 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond384#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond111#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond341#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond271#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond112#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond342#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond272#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond385#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond113#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond343#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond273#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond428 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond386#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond344#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond274#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond387#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond114#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond345#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond275#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond430 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond388#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond115#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond346#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond276#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond431 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond389#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond116#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond347#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond432 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond390#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond117#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond348#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond277#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond433 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond391#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond349#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond278#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 8 +.+.: nf_conntrack_mutex.wait_lock FD: 220 BD: 1 +.+.: (wq_completion)bond434 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond392#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond119#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond350#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond393 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond280#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond351#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond435 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond394#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond281#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond352#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond436 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond120#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond395#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond282#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond353#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond437 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond121#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond396#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond438 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond122#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond397#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond283#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond354#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond439 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond284#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond355#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond440 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond123#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond399#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond400#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond441 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond285#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond442 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond286#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond356#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond443 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond401#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond357#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond444 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond402#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond287#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond403#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond288#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond359#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond445 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond125#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond404#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond360#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond446 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond126#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond405#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond447 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond127#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond406#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond448 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond361#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond128#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond407#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond449 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond362#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond129#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond408#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond450 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond363#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond130#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond409#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond451 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond364#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond410 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond131#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond452 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond365#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond411#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond132#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond453 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond366#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond412#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond133#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 220 BD: 1 +.+.: (wq_completion)bond454 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond367#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond413#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond455 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond368#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond414#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond456 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond369#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond135#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond415#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond457 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond370#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond136#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond416#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond458 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond371#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond137#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond417#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond459 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond372#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond418#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond138#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond460 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond373#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond419#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->rcu_node_0 FD: 220 BD: 1 +.+.: (wq_completion)bond139#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond461 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond374#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond420#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond140#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond462 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond375#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond421#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond463 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond376#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond422#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond142#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond464 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond377#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond423#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond465 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond378#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond143#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond424#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond466 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond379#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond144#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond425#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond467 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond380#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond426#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond145#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond468 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond381#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond427 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond146#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond469 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond382#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond428#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond147#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond470 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond383#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond429 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond471 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond148#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond384#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond430#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond472 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond149#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond385#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond431#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond473 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 220 BD: 1 +.+.: (wq_completion)bond150#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond386#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond432#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond474 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond151#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond387#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond433#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond475 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond388#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond152#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond434#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond476 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond389#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond153#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond435#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond477 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond390#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond154#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond436#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond478 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond391#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond155#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond437#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond392#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond156#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond438#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 220 BD: 1 +.+.: (wq_completion)bond480 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond393#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond157#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond439#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond481 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond394#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond158#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond440#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond482 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond395#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond159#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond441#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond483 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond396#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond442#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond160#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond484 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond397#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond443#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond161#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond485 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond398#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond444#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond162#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond486 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond399#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond445#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond163#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond487 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond400#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond446#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond164#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond488 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond401#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond447#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond489 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond165#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond402#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond448#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond490 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond166#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond403#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond449#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->rcu_node_0 ->&rcu_state.expedited_wq FD: 220 BD: 1 +.+.: (wq_completion)bond491 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond167#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond404#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond492 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond168#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond451#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond405#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond493 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond452#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond169#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond406#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond494 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond453#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond170#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond407#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond495 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond454#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond171#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond408#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond496 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond455#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond409#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond172#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond497 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond456#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond410#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond457#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond411#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond499 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond458#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond412#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond500 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond459#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond413#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond501 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond460#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond414#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond502 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond461#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond415#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond503 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond462#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond416#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond504 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond463#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond417#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond505 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond464#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond418#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond506 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond465#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond419#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond507 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond466#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond420#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond181#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond508 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond467#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond421#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 220 BD: 1 +.+.: (wq_completion)bond182#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock FD: 220 BD: 1 +.+.: (wq_completion)bond509 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond468#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond422#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond510 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond469#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond423#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond183#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond511 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond470#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond424#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond184#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond512 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond471#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond425#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond185#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond513 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond472#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond426#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond186#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond514 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond473#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond427#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond187#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond515 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond474#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond428#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond188#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond475#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond189#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond516 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond476#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond429#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond190#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond517 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond477#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond430#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond518 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond191#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond478#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond431#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond519 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond192#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond479 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond432#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond520 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond193#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond480#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond433#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond521 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond194#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond481#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond434#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond522 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond195#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond482#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond435#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond523 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond196#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 220 BD: 1 +.+.: (wq_completion)bond483#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond436#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond524 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond197#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond484#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond437#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond525 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond198#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond485#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond438#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond526 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond199#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond486#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond439#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond527 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond200#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond487#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond528 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond201#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond441#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond529 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond202#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond489#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond442#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond530 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond203#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond490#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond443#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond531 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond204#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond491#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond444#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond532 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond492#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond205#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond445#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond533 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 220 BD: 1 +.+.: (wq_completion)bond493#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond206#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond446#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond494#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond207#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond447#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond535 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond495#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond208#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond536 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond496#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond209#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond449#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond537 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond497#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond450#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond210#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond538 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond451#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond498 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond539 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond211#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond452#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond499#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond540 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond212#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond453#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond500#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond541 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond213#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond454#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond501#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond542 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond214#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond455#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond502#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond543 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond215#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond456#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond503#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond544 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond216#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond457#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond504#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond545 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond217#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond458#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond505#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond546 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond218#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond459#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond506#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond547 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond507#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond548 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond219#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond460#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock FD: 220 BD: 1 +.+.: (wq_completion)bond508#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond549 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond220#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond461#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond509#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond550 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond221#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond462#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond510#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond551 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond463#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 220 BD: 1 +.+.: (wq_completion)bond222#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond511#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond552 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond464#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond223#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond512#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond553 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond465#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond224#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond513#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond554 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond466#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond225#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond514#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond555 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond467#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond226#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond515#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond556 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond468#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond227#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond557 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond469#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond228#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond517#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond558 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond470#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond229#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond518#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond559 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond471#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond230#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 220 BD: 1 +.+.: (wq_completion)bond519#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond560 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond472#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond231#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond520#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond561 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond473#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond232#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond521#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond562 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond474#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond233#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond522#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond563 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond475#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond234#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond523#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond564 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond476#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond235#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond524#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond565 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond477#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond236#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond525#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond566 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond478#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond237#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond526#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond567 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond479#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond238#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond527#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond568 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond480#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond240#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond528#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond569 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 220 BD: 1 +.+.: (wq_completion)bond481#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond241#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond529#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond570 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond482#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond242#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond530#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond571 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond483#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond243#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond572 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond531#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond484#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond573 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond244#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond532#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond485#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond574 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond245#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond533#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond486#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond575 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond246#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond534 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond487#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond576 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond247#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond535#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond488#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond536#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond489#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond577 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond248#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond537#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond490#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond578 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond249#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond538#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond491#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond579 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond250#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond539#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond492#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond580 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond251#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond540#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond493#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond581 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond541#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond252#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond542#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 220 BD: 1 +.+.: (wq_completion)bond494#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond253#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond543#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond495#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond583 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond254#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond544#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond496#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond584 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond255#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond545#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond497#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond585 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond256#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 220 BD: 1 +.+.: (wq_completion)bond257#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond546#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond498#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond586 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond258#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond547#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond499#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond587 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond259#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond548#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond500#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond588 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond260#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond549#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond501#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond589 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond550#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond502#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond590 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond551#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond503#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond591 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond552#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock FD: 220 BD: 1 +.+.: (wq_completion)bond504#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond261#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond592 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond553#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond505#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond262#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 220 BD: 1 +.+.: (wq_completion)bond593 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond554#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond263#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond555#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond594 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond556#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond133#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond506#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond264#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond595 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond557#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond507#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond134#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond596 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond558#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond508#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond135#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond597 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond509#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond136#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond265#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond598 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond559#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond599 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond600 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond510#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond266#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond560#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond267#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond601 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond511#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond268#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond602 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond561#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond512#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond269#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond603 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond562#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond270#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond604 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond137#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond138#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond271#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond605 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond139#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond513#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond514#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond140#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond272#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond606 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond141#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond515#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond273#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond607 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond142#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond274#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond143#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond275#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond516#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond276#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond608 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond277#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond144#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond278#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond609 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond145#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond517#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond279#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond610 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond146#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond518#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond147#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond519#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond280#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond281#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond520#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond282#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond521#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond148#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond149#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond522#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 220 BD: 1 +.+.: (wq_completion)bond283#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond150#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond284#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond523#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond151#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond285#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond524#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond152#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond286#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond525#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond153#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond287#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond526#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond154#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond527#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond155#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond288#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond528#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond529#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond156#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond157#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond289#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond530#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond158#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond290#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond531#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond159#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond291#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond532#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond292#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond533#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond293#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond534#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond294#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond535#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond162#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond536#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond163#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond581#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond296#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond537#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond164#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond297#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond538#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond165#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond298#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond539#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond166#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond299#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond540#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond167#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond584#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond585#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond300#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond168#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond586#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond169#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond302#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond636 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond587#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond588#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond589#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond303#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond590#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond304#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond174#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond591#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond638 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond175#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond592#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond305#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond306#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond593#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond307#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond639 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond176#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond308#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond640 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond177#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond594#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond641 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond178#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond595#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond310#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond642 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond643 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond179#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond596#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond181#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond597#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond644 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond182#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond598#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond311#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond183#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond312#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond645 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond646 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond184#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond599#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond647 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond600#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond313#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond648 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond649 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond601#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond315#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond650 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond186#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond602#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond651 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond187#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond652 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond188#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond603#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 220 BD: 1 +.+.: (wq_completion)bond316#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond653 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond654 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond563#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond189#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond604#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond655 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond564#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond190#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond605#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond317#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond656 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond606#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond191#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond565#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond657 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond607#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond566#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond193#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond658 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond567#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond608#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond194#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond568#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond609#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond610#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond320#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond569#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond321#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond570#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond571#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond611 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond323#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond195#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond572#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond612 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond324#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond196#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond573#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond659 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond325#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond574#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond197#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond660 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond613 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond575#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond326#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond198#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond661 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond576#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond662 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond577#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 220 BD: 1 +.+.: (wq_completion)bond327#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond199#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond663 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond614 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond578#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond200#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond615 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond579#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond580#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond328#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond201#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond664 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond616 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond581#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond665 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond582 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond617 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond666 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 220 BD: 1 +.+.: (wq_completion)bond618 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond583#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond619 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond584#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond329#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond667 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond668 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond202#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond330#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond621 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond669 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond203#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond585#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond331#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond670 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond332#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond586#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond622 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond671 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond587#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond333#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond623 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond672 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond588#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond589#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond334#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond624 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond335#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond673 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond336#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond625 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond674 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond204#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond626 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond675 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond337#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond627 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond676 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond628 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 220 BD: 1 +.+.: (wq_completion)bond590#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond629 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond677 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond591#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond678 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond205#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond592#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond630 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond631 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond339#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond679 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond206#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond593#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 1 BD: 4447 ....: key#28 FD: 1 BD: 4410 +.+.: &vmpr->sr_lock FD: 220 BD: 1 +.+.: (wq_completion)bond680 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond207#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 4413 +.+.: f2fs_list_lock FD: 76 BD: 1 .+.+: &type->s_umount_key#49 ->&rq->__lock ->&lru->node[i].lock ->&dentry->d_lock ->&sb->s_type->i_lock_key#24 ->&s->s_inode_list_lock ->&xa->xa_lock#8 ->inode_hash_lock ->&obj_hash[i].lock ->pool_lock#2 ->&fsnotify_mark_srcu ->&wb->list_lock ->kernfs_idr_lock ->&cfs_rq->removed.lock FD: 220 BD: 1 +.+.: (wq_completion)bond594#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond632 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond340#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond681 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond208#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond209#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond682 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond633 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond341#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 31 BD: 1 ..-.: &(&krcp->page_cache_work)->timer FD: 220 BD: 1 +.+.: (wq_completion)bond210#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond595#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond634 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond342#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond683 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond211#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond596#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond635 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond597#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond636#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond343#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond684 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond637 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond344#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond685 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond212#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond213#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond598#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond638#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond639#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond345#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond686 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond214#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond599#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond346#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond687 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond215#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond216#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond600#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond640#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond347#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond348#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond688 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond217#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond601#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 3 BD: 4427 ..-.: lock#11 ->&lruvec->lru_lock FD: 220 BD: 1 +.+.: (wq_completion)bond641#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond689 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond602#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond218#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond642#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond690 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond603#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond219#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond643#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond691 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond220#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond644#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond352#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond221#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond692 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond222#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond693 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond605#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 220 BD: 1 +.+.: (wq_completion)bond223#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond645#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond353#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond694 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond224#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond354#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond695 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond355#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond356#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond696 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond606#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond225#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond647#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond357#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond697 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond648#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond226#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond227#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond698 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond607#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond649#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond358#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond699 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond608#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond650#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond228#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond651#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond359#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond609#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond229#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond700 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond610#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond652#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond360#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond701 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->rcu_node_0 ->&rcu_state.expedited_wq FD: 220 BD: 1 +.+.: (wq_completion)bond611#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond230#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond231#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond653#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond702 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond703 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond654#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond361#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond704 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond612#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond655#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond705 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond613#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond232#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond656#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond657#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond362#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond614#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond233#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond706 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond363#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond707 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond615#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond234#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond708 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond364#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond616#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond658#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond709 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond365#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond710 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond366#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond367#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond617#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond235#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond711 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond618#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond368#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 220 BD: 1 +.+.: (wq_completion)bond619#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond659#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond236#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond712 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond620 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond660#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond237#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond713 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond621#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond238#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond714 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond622#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond369#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond661#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond623#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond370#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond662#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond715 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond624#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond371#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond663#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond716 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond625#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond626#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond239#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond372#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond664#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond717 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond665#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond718 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond627#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond240#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond373#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond241#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond242#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond374#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond666#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond719 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond667#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond628#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond243#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond668#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond720 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond629#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond669#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond375#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond721 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond630#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond244#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond722 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond245#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond670#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond723 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond376#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond724 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond246#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond671#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond726 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond672#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond377#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond727 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond728 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond729 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond247#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond631#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond673#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond248#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond632#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond674#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond675#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond378#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond730 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond249#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond731 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond250#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond633#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond251#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond634#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond732 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond676#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond733 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond635#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond252#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond734 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 134 BD: 2 +.+.: &drbg->drbg_mutex ->&rq->__lock ->crypto_alg_sem ->fs_reclaim ->&c->lock ->pool_lock#2 ->crngs.lock ->&rng->jent_lock FD: 4 BD: 5 +.+.: &rng->jent_lock ->&obj_hash[i].lock ->pool_lock#2 FD: 220 BD: 1 +.+.: (wq_completion)bond636#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond637#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond253#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond677#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond735 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond736 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond638#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond254#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond678#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond639#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond255#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond640#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond256#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond679#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond380#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond641#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond680#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond381#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond642#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond737 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond681#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond382#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond643#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond738 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond682#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond257#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond644#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond383#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond258#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond645#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond739 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond683#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond384#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond259#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond646#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond740 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond684#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond260#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond741 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond685#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond261#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond385#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond647#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond742 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond648#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond686#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond262#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond386#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond649#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond743 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond650#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond387#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond744 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond264#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond745 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond388#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 65 BD: 1 .+.+: &type->s_umount_key#50 ->&rq->__lock ->&lru->node[i].lock ->&dentry->d_lock ->&sb->s_type->i_lock_key#30 ->&s->s_inode_list_lock ->&xa->xa_lock#8 ->inode_hash_lock ->&obj_hash[i].lock ->&fsnotify_mark_srcu FD: 220 BD: 1 +.+.: (wq_completion)bond746 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond687#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond265#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond651#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond389#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond747 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond390#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond748 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond749 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond688#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond266#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond652#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 220 BD: 1 +.+.: (wq_completion)bond689#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond267#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond268#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond391#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond750 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond269#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond653#3 ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond751 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond690#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond691#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond692#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond270#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond693#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond271#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond654#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond752 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond694#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond272#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond655#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond753 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond695#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond696#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond754 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond656#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond697#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond273#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond657#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond755 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond698#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond274#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond275#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond392#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond658#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond756 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond699#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond276#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond393#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond277#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond278#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond394#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond659#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond757 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond279#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond660#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond700#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond661#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond395#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond662#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond759 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond760 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond701#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond280#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond761 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond396#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond762 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond702#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond281#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond397#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond663#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond763 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond703#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond398#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond664#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond764 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond282#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond283#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond704#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond399#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond665#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond765 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond766 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond284#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond705#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond666#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond767 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond768 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond706#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond400#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond667#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond769 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond707#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond770 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond668#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond771 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond285#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond708#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond401#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond669#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond670#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond709#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond671#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond772 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond710#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond672#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond773 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond286#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond711#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond673#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond774 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond712#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond674#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond775 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond776 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond713#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond777 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond714#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond675#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond778 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond287#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond715#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond288#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond676#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond779 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond289#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond677#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond716#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond780 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond290#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond678#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond717#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond291#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond718#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond781 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond292#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond679#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond782 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond680#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond783 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond293#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond681#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond719#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond784 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond785 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond294#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->rcu_node_0 ->&rcu_state.expedited_wq ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond295#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond682#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond720#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 220 BD: 1 +.+.: (wq_completion)bond786 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond296#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond683#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond787 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond721#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond297#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond684#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond788 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond298#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond685#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond789 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond722#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond299#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond790 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond723#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond686#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond724#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond791 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond687#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond725 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond300#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond792 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond688#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond726#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond793 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond689#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond727#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond794 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond690#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond728#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond795 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond729#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond730#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond731#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond796 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond691#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond732#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond302#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond797 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond692#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond733#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond798 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond693#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond734#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond799 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond694#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond735#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond736#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond695#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond737#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond800 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond696#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond304#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond801 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond738#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond802 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond803 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond697#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond739#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond305#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond804 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond698#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond740#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond306#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond805 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond741#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond307#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond806 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond807 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond699#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond742#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond308#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond700#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 220 BD: 1 +.+.: (wq_completion)bond743#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond309#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond808 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond701#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond744#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 220 BD: 1 +.+.: (wq_completion)bond310#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond809 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond810 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond311#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond702#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond745#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond811 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond312#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond703#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond746#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond812 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond704#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond747#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond813 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond313#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond705#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond748#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond814 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond706#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond749#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond314#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond815 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond707#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond315#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond316#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond816 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond317#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond817 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond708#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond750#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond318#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond751#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond319#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond818 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond709#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond320#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond710#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond752#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond321#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond819 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond425#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond711#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond753#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond322#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond754#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond289#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond426#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond323#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond755#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond712#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond820 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond324#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond713#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond821 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond756#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 220 BD: 1 +.+.: (wq_completion)bond290#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond291#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond325#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond822 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 220 BD: 1 +.+.: (wq_completion)bond292#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond757#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond293#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond758 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond326#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond823 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond714#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond824 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond825 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond759#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond826 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond294#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond427#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond715#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond295#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond827 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond716#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond428#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond296#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond760#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 220 BD: 1 +.+.: (wq_completion)bond429#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond761#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond762#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond828 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond430#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond763#2 ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond764#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond829 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond717#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond431#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond765#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond830 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond718#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond432#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond766#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond767#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond831 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond719#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond433#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond768#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond720#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond832 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond434#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond769#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond721#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond833 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond722#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond770#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 220 BD: 1 +.+.: (wq_completion)bond435#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond834 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond771#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond723#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond724#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond436#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond835 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond772#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond725#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond836 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond773#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond726#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond437#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond837 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond774#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond727#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond438#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond838 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond439#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond839 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond775#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond728#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond440#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond776#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond441#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond840 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond777#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond729#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond442#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond841 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 30 BD: 4 +.+.: port_mutex#2 ->&rq->__lock ->local_port_range_lock.seqcount ->&pnsocks.lock FD: 220 BD: 1 +.+.: (wq_completion)bond778#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 5 ....: local_port_range_lock.seqcount FD: 220 BD: 1 +.+.: (wq_completion)bond842 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond779#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond730#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond443#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond731#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond780#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond843 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond444#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond732#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond733#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond781#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond844 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond445#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond734#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond446#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond735#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond782#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond845 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond736#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond846 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond447#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond737#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond847 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond783#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond738#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond448#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond784#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond848 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond449#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond450#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond785#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond786#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond739#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond787#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond740#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond849 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond451#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond788#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 220 BD: 1 +.+.: (wq_completion)bond741#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond850 ->(work_completion)(&(&slave->notify_work)->work) ->&rq->__lock ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond452#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond789#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond851 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond790#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond453#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond742#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond852 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond791#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond454#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond743#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond853 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond792#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond455#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond744#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond854 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 220 BD: 1 +.+.: (wq_completion)bond456#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond745#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond457#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond746#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond855 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond856 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond793#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond857 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond794#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond458#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond747#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond858 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond795#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond796#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond748#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond859 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond797#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond460#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond749#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond860 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond798#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond462#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond750#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond861 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond862 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond799#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond751#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond863 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond800#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond752#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond864 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond464#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond753#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond865 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond801#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond465#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond866 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond867 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond802#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond754#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond868 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond803#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond755#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond869 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond756#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond466#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond757#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond870 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond758#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond467#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond468#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond871 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond804#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond759#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond805#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond806#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond469#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond872 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond807#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond873 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond808#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond761#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond874 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond809#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond762#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond763#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond810#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond875 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond811#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond764#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond876 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond812#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond765#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond877 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond813#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond767#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond878 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond814#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond768#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond879 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond769#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond815#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond880 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond476#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond770#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond816#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond881 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond477#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond771#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond817#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond882 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond772#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond883 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond478#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond818#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond884 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond479#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond773#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond819#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond885 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond820#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond480#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond821#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond774#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond887 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond481#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond822#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond775#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond482#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond823#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond483#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond824#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond776#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond825#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond777#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond888 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond484#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond485#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond826#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond778#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond827#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond779#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond486#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond828#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond889 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond829#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond830#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond780#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond890 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond831#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond781#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond832#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond782#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond891 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 220 BD: 1 +.+.: (wq_completion)bond833#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond892 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond834#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond893 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond835#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond783#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond894 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond836#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond784#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond895 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond837#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond785#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond896 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond786#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond897 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond838#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond787#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond839#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond898 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond788#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond840#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond899 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond789#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond900 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond488#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 220 BD: 1 +.+.: (wq_completion)bond790#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond841#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond791#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond792#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond489#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond793#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond842#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond901 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond843#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond902 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond794#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond844#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond903 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond795#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond845#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond904 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond796#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond490#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond846#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond905 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond797#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond847#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond906 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond491#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond798#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond848#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond907 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond492#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond799#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond849#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond908 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond800#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond909 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond801#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond910 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond850#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond802#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond911 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond851#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond803#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond804#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond852#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond853#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond912 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond913 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond805#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond854#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond914 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond806#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond855#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond915 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond916 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond493#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond856#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond917 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond807#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond857#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond918 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond808#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond858#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond919 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond809#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond494#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond859#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond810#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond920 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond860#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond811#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond921 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond922 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond861#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond862#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond812#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond863#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond813#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond923 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond864#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond814#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond924 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond865#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond495#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond815#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond866#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond816#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond817#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond925 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond926 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond867#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond818#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond868#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond819#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond927 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond869#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond820#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond928 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond870#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond929 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond871#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond821#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond872#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond822#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond930 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond873#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond823#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond931 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond874#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond824#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond932 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond875#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond825#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond933 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond327#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond934 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond935 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond297#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond826#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond827#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond328#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond936 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond329#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond298#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond876#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond877#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond828#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond937 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond878#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond879#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond880#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond829#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond330#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond881#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond938 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond830#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 220 BD: 1 +.+.: (wq_completion)bond831#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond499#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond939 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond882#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond940 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond299#6 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond300#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond331#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond883#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond301#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond832#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond302#6 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond884#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond332#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond333#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond941 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond885#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 220 BD: 1 +.+.: (wq_completion)bond833#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond500#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->rcu_node_0 ->&rcu_state.expedited_wq FD: 220 BD: 1 +.+.: (wq_completion)bond886 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 1 BD: 2 ..-.: key#29 FD: 220 BD: 1 +.+.: (wq_completion)bond887#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond834#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond501#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond942 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond888#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond835#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond943 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond836#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond944 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond889#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond945 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond334#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond946 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond335#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond837#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond947 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond890#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond336#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond838#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond839#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond891#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond840#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond892#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond841#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond893#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond948 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond894#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond949 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond337#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond502#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond842#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond843#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond895#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond844#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond896#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond950 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond845#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond951 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond897#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond952 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond953 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond847#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond898#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond848#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond899#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond954 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond849#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond900#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond850#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond955 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond901#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond956 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond851#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond902#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond957 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond852#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond853#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond903#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond904#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond959 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond854#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond905#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond960 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond855#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond961 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond856#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond906#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond907#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond857#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond962 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond505#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond908#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond858#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond963 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond964 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond859#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond909#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond338#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond506#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond965 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond860#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond910#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond339#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond966 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond861#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond911#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond340#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond967 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond862#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond912#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond341#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond507#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond968 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond863#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond913#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond342#5 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond969 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond864#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond914#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond865#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond915#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock ->&cfs_rq->removed.lock ->&obj_hash[i].lock FD: 220 BD: 1 +.+.: (wq_completion)bond866#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond916#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond970 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond343#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond971 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond917#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond868#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond972 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond918#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond344#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond869#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 1 BD: 3 ....: &xa->xa_lock#20 FD: 220 BD: 1 +.+.: (wq_completion)bond973 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond919#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond870#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond974 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond920#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond975 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond872#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond921#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond976 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond873#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond511#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond922#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond977 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond874#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond923#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond978 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond875#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond924#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond979 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond876#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond925#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond980 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond877#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond926#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond981 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond878#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond982 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond879#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond928#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond983 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond880#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond929#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond984 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond881#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond930#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond985 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond345#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond512#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond931#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond986 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond883#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond346#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond513#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond932#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond987 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond884#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond347#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond514#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond933#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond348#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond934#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond988 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond885#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond349#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond935#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond989 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond886#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond350#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond936#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond990 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond887#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond937#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond991 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond888#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond938#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond992 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond889#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond939#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond993 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond890#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond940#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond994 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond891#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond941#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond351#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond995 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond942#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond352#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond996 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond892#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond943#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond353#5 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond997 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond893#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond944#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond998 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond894#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond999 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond946#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond895#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond1000 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond896#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond947#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond521#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond1001 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond948#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond897#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond1002 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond949#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond898#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond1003 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond522#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond950#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond899#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond1004 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond951#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond900#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond1005 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond952#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond901#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond1006 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond953#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond902#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond954#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond1007 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond955#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond903#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond1008 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond523#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond956#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond1009 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond957#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond904#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond524#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond1010 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond958 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond905#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond1011 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond959#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond906#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond1012 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond960#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond907#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond1013 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond961#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond908#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond1014 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond962#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond909#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond963#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond910#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond1016 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond964#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond911#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond1017 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond965#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond912#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond1018 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond966#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond913#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond526#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond1019 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond967#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond914#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond1020 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond968#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond915#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond527#4 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond916#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond1021 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond969#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond1022 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond970#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond917#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond918#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond1023 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond971#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond919#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond1024 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond530#4 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond972#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond920#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond1025 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond973#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond921#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond1026 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond974#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond922#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) ->&rq->__lock FD: 220 BD: 1 +.+.: (wq_completion)bond1027 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond975#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond923#3 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond1028 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond976#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond924#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond1029 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond977#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond925#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond1030 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond978#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond926#3 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond1031 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond979#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond927#2 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 220 BD: 1 +.+.: (wq_completion)bond1032 ->(work_completion)(&(&slave->notify_work)->work) ->(work_completion)(&(&bond->mcast_work)->work) FD: 152 BD: 1 +.+.: (wq_completion)bond980#2 ->&rq->__lock ->(work_completion)(&(&slave->notify_work)->work) (buggered) all lock chains: irq_context: 0 &obj_hash[i].lock irq_context: 0 &obj_hash[i].lock pool_lock irq_context: 0 cgroup_mutex irq_context: 0 (console_sem).lock irq_context: 0 cpu_hotplug_lock irq_context: 0 cpu_hotplug_lock jump_label_mutex irq_context: 0 cpu_hotplug_lock static_call_mutex irq_context: 0 cpu_hotplug_lock static_call_mutex text_mutex irq_context: 0 console_mutex irq_context: 0 console_mutex syslog_lock irq_context: 0 console_mutex (console_sem).lock irq_context: 0 console_mutex console_lock console_srcu console_owner_lock irq_context: 0 console_mutex console_lock console_srcu console_owner irq_context: 0 console_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 console_lock console_srcu console_owner_lock irq_context: 0 console_lock console_srcu console_owner irq_context: 0 console_lock console_srcu console_owner console_owner_lock irq_context: 0 input_pool.lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex irq_context: 0 clocksource_mutex irq_context: 0 clocksource_mutex watchdog_lock irq_context: 0 cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 resource_lock irq_context: 0 cache_disable_lock irq_context: 0 pgd_lock irq_context: 0 init_mm.page_table_lock irq_context: 0 init_mm.page_table_lock pgd_lock irq_context: 0 early_pfn_lock irq_context: 0 acpi_ioapic_lock irq_context: 0 acpi_ioapic_lock ioapic_lock irq_context: 0 acpi_ioapic_lock (console_sem).lock irq_context: 0 acpi_ioapic_lock console_lock console_srcu console_owner_lock irq_context: 0 acpi_ioapic_lock console_lock console_srcu console_owner irq_context: 0 acpi_ioapic_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 syscore_ops_lock irq_context: 0 map_entries_lock irq_context: 0 devtree_lock irq_context: 0 pcpu_lock irq_context: 0 param_lock irq_context: 0 base_crng.lock irq_context: 0 crng_init_wait.lock irq_context: 0 zonelist_update_seq irq_context: 0 zonelist_update_seq zonelist_update_seq.seqcount irq_context: 0 dmar_global_lock irq_context: 0 &zone->lock irq_context: 0 &zone->lock &____s->seqcount irq_context: 0 &pcp->lock &zone->lock irq_context: 0 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &____s->seqcount irq_context: 0 pool_lock#2 irq_context: 0 pcpu_alloc_mutex irq_context: 0 pcpu_alloc_mutex pcpu_lock irq_context: 0 &n->list_lock irq_context: 0 &c->lock irq_context: 0 slab_mutex irq_context: 0 slab_mutex pool_lock#2 irq_context: 0 slab_mutex &c->lock irq_context: 0 slab_mutex &n->list_lock irq_context: 0 slab_mutex pcpu_alloc_mutex irq_context: 0 slab_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 batched_entropy_u64.lock irq_context: 0 batched_entropy_u64.lock crngs.lock irq_context: 0 batched_entropy_u64.lock crngs.lock base_crng.lock irq_context: 0 espfix_init_mutex irq_context: 0 espfix_init_mutex &pcp->lock &zone->lock irq_context: 0 espfix_init_mutex &zone->lock irq_context: 0 espfix_init_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 espfix_init_mutex &____s->seqcount irq_context: 0 espfix_init_mutex pool_lock#2 irq_context: 0 percpu_counters_lock irq_context: 0 &mm->page_table_lock irq_context: 0 ptlock_ptr(page) irq_context: 0 ptlock_ptr(page)#2 irq_context: 0 trace_types_lock irq_context: 0 panic_notifier_list.lock irq_context: 0 die_chain.lock irq_context: 0 trace_event_sem irq_context: 0 batched_entropy_u32.lock irq_context: 0 batched_entropy_u32.lock crngs.lock irq_context: 0 &rq->__lock irq_context: 0 &rq->__lock rcu_read_lock &cfs_b->lock irq_context: 0 init_task.pi_lock irq_context: 0 init_task.pi_lock &rq->__lock irq_context: 0 init_task.vtime_seqcount irq_context: 0 slab_mutex &pcp->lock &zone->lock irq_context: 0 slab_mutex &zone->lock irq_context: 0 slab_mutex &____s->seqcount irq_context: 0 wq_pool_mutex irq_context: 0 wq_pool_mutex &pcp->lock &zone->lock irq_context: 0 wq_pool_mutex &zone->lock irq_context: 0 wq_pool_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 wq_pool_mutex &____s->seqcount irq_context: 0 wq_pool_mutex pool_lock#2 irq_context: 0 wq_pool_mutex &c->lock irq_context: 0 &wq->mutex irq_context: 0 &wq->mutex &pool->lock irq_context: 0 wq_pool_mutex &wq->mutex irq_context: 0 cpu_hotplug_lock wq_pool_mutex irq_context: 0 cpu_hotplug_lock wq_pool_mutex pool_lock#2 irq_context: 0 cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pcp->lock &zone->lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &zone->lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 cpu_hotplug_lock wq_pool_mutex &____s->seqcount irq_context: 0 cpu_hotplug_lock wq_pool_mutex &c->lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &wq->mutex irq_context: 0 cpu_hotplug_lock wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 wq_pool_mutex &wq->mutex &pool->lock irq_context: 0 shrinker_rwsem irq_context: 0 rcu_node_0 irq_context: 0 rcu_state.barrier_lock irq_context: 0 rcu_state.barrier_lock rcu_node_0 irq_context: 0 &rnp->exp_poll_lock irq_context: 0 &rnp->exp_poll_lock rcu_read_lock &pool->lock irq_context: 0 &rnp->exp_poll_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 slab_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 trace_event_sem trace_event_ida.xa_lock irq_context: 0 trace_event_sem trace_event_ida.xa_lock &pcp->lock &zone->lock irq_context: 0 trace_event_sem trace_event_ida.xa_lock &zone->lock irq_context: 0 trace_event_sem trace_event_ida.xa_lock &____s->seqcount irq_context: 0 trace_event_sem trace_event_ida.xa_lock pool_lock#2 irq_context: 0 trace_event_sem trace_event_ida.xa_lock &c->lock irq_context: 0 trigger_cmd_mutex irq_context: 0 i8259A_lock irq_context: 0 rcu_read_lock pool_lock#2 irq_context: 0 irq_domain_mutex irq_context: 0 free_vmap_area_lock irq_context: 0 vmap_area_lock irq_context: 0 &irq_desc_lock_class irq_context: 0 vmap_purge_lock irq_context: 0 vmap_purge_lock purge_vmap_area_lock irq_context: 0 cpa_lock irq_context: 0 cpa_lock pgd_lock irq_context: 0 timekeeper_lock irq_context: 0 timekeeper_lock tk_core.seq.seqcount irq_context: 0 timekeeper_lock tk_core.seq.seqcount &obj_hash[i].lock irq_context: 0 tk_core.seq.seqcount irq_context: 0 &base->lock irq_context: 0 &base->lock &obj_hash[i].lock irq_context: 0 batched_entropy_u32.lock crngs.lock base_crng.lock irq_context: 0 rcu_read_lock &pool->lock/1 irq_context: 0 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 pmus_lock irq_context: 0 pmus_lock pcpu_alloc_mutex irq_context: 0 pmus_lock pcpu_alloc_mutex pcpu_lock irq_context: 0 pmus_lock pool_lock#2 irq_context: 0 pmus_lock &obj_hash[i].lock irq_context: 0 &swhash->hlist_mutex irq_context: 0 pmus_lock &cpuctx_mutex irq_context: 0 tty_ldiscs_lock irq_context: 0 console_lock irq_context: 0 console_lock resource_lock irq_context: 0 console_lock pool_lock#2 irq_context: 0 console_lock &obj_hash[i].lock irq_context: 0 console_lock &pcp->lock &zone->lock irq_context: 0 console_lock &zone->lock irq_context: 0 console_lock &____s->seqcount irq_context: 0 console_lock &c->lock irq_context: 0 console_lock kbd_event_lock irq_context: 0 console_lock kbd_event_lock led_lock irq_context: 0 console_lock vga_lock irq_context: 0 console_lock (console_sem).lock irq_context: 0 console_lock console_owner_lock irq_context: 0 console_mutex &port_lock_key irq_context: 0 console_mutex console_lock irq_context: 0 console_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 console_mutex console_srcu_srcu_usage.lock irq_context: 0 console_mutex console_srcu_srcu_usage.lock &obj_hash[i].lock irq_context: 0 console_mutex &ACCESS_PRIVATE(sdp, lock) irq_context: 0 console_mutex console_srcu irq_context: 0 console_lock console_srcu console_owner &port_lock_key irq_context: 0 init_task.alloc_lock irq_context: 0 acpi_ioremap_lock irq_context: 0 acpi_ioremap_lock pool_lock#2 irq_context: 0 acpi_ioremap_lock resource_lock irq_context: 0 acpi_ioremap_lock memtype_lock irq_context: 0 acpi_ioremap_lock free_vmap_area_lock irq_context: 0 acpi_ioremap_lock vmap_area_lock irq_context: 0 semaphore->lock irq_context: 0 *(&acpi_gbl_reference_count_lock) irq_context: 0 clockevents_lock irq_context: 0 clockevents_lock tk_core.seq.seqcount irq_context: 0 clockevents_lock tick_broadcast_lock irq_context: 0 clockevents_lock i8253_lock irq_context: 0 &desc->request_mutex irq_context: 0 &desc->request_mutex &irq_desc_lock_class irq_context: 0 &desc->request_mutex &irq_desc_lock_class i8259A_lock irq_context: 0 ioapic_lock irq_context: 0 ioapic_mutex irq_context: 0 ioapic_mutex &domain->mutex irq_context: 0 ioapic_mutex &domain->mutex pool_lock#2 irq_context: 0 ioapic_mutex &domain->mutex vector_lock irq_context: 0 ioapic_mutex &domain->mutex &irq_desc_lock_class irq_context: 0 ioapic_mutex &domain->mutex i8259A_lock irq_context: 0 ioapic_mutex &domain->mutex &c->lock irq_context: 0 ioapic_mutex &domain->mutex &pcp->lock &zone->lock irq_context: 0 ioapic_mutex &domain->mutex &zone->lock irq_context: 0 ioapic_mutex &domain->mutex &____s->seqcount irq_context: 0 vector_lock irq_context: 0 &pool->lock#2 irq_context: hardirq jiffies_lock irq_context: hardirq jiffies_lock jiffies_seq.seqcount irq_context: hardirq hrtimer_bases.lock irq_context: hardirq hrtimer_bases.lock tk_core.seq.seqcount irq_context: hardirq log_wait.lock irq_context: 0 spec_ctrl_mutex irq_context: 0 spec_ctrl_mutex cpu_hotplug_lock irq_context: softirq drivers/char/random.c:1010 irq_context: softirq drivers/char/random.c:1010 input_pool.lock irq_context: 0 slab_mutex rcu_read_lock pool_lock#2 irq_context: 0 slab_mutex &obj_hash[i].lock irq_context: 0 sysctl_lock irq_context: 0 tomoyo_policy_lock irq_context: 0 tomoyo_policy_lock pool_lock#2 irq_context: 0 aa_secids.xa_lock irq_context: 0 aa_secids.xa_lock pool_lock#2 irq_context: 0 aa_buffers_lock irq_context: 0 pernet_ops_rwsem irq_context: 0 pernet_ops_rwsem stack_depot_init_mutex irq_context: 0 pernet_ops_rwsem crngs.lock irq_context: 0 pernet_ops_rwsem net_rwsem irq_context: 0 pernet_ops_rwsem proc_inum_ida.xa_lock irq_context: 0 rtnl_mutex irq_context: 0 rtnl_mutex &c->lock irq_context: 0 rtnl_mutex &pcp->lock &zone->lock irq_context: 0 rtnl_mutex &zone->lock irq_context: 0 rtnl_mutex &____s->seqcount irq_context: 0 rtnl_mutex pool_lock#2 irq_context: 0 lock irq_context: 0 lock kernfs_idr_lock irq_context: 0 lock kernfs_idr_lock pool_lock#2 irq_context: 0 &root->kernfs_rwsem irq_context: 0 file_systems_lock irq_context: 0 &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_lock irq_context: 0 &type->s_umount_key/1 irq_context: 0 &type->s_umount_key/1 pool_lock#2 irq_context: 0 &type->s_umount_key/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key/1 shrinker_rwsem irq_context: 0 &type->s_umount_key/1 shrinker_rwsem pool_lock#2 irq_context: 0 &type->s_umount_key/1 list_lrus_mutex irq_context: 0 &type->s_umount_key/1 sb_lock irq_context: 0 &type->s_umount_key/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key/1 &obj_hash[i].lock irq_context: 0 &type->s_umount_key/1 percpu_counters_lock irq_context: 0 &type->s_umount_key/1 crngs.lock irq_context: 0 &type->s_umount_key/1 &sbinfo->stat_lock irq_context: 0 &type->s_umount_key/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key/1 &zone->lock irq_context: 0 &type->s_umount_key/1 &____s->seqcount irq_context: 0 &type->s_umount_key/1 &c->lock irq_context: 0 &type->s_umount_key/1 &sb->s_type->i_lock_key irq_context: 0 &type->s_umount_key/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key/1 batched_entropy_u32.lock irq_context: 0 &type->s_umount_key/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->s_umount_key/1 &sb->s_type->i_lock_key &dentry->d_lock irq_context: 0 &type->s_umount_key/1 &dentry->d_lock irq_context: 0 mnt_id_ida.xa_lock irq_context: 0 &dentry->d_lock irq_context: 0 mount_lock irq_context: 0 mount_lock mount_lock.seqcount irq_context: 0 rcu_read_lock &dentry->d_lock irq_context: 0 &type->s_umount_key#2/1 irq_context: 0 &type->s_umount_key#2/1 pool_lock#2 irq_context: 0 &type->s_umount_key#2/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#2/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#2/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#2/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#2/1 sb_lock irq_context: 0 &type->s_umount_key#2/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#2/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#2/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->s_umount_key#2/1 &____s->seqcount irq_context: 0 &type->s_umount_key#2/1 &c->lock irq_context: 0 &type->s_umount_key#2/1 &sb->s_type->i_lock_key#2 irq_context: 0 &type->s_umount_key#2/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#2/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#2/1 &sb->s_type->i_lock_key#2 &dentry->d_lock irq_context: 0 &type->s_umount_key#2/1 &dentry->d_lock irq_context: 0 ucounts_lock irq_context: 0 proc_inum_ida.xa_lock irq_context: 0 init_fs.lock irq_context: 0 init_fs.lock init_fs.seq.seqcount irq_context: 0 &type->s_umount_key#3/1 irq_context: 0 &type->s_umount_key#3/1 pool_lock#2 irq_context: 0 &type->s_umount_key#3/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#3/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#3/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#3/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#3/1 sb_lock irq_context: 0 &type->s_umount_key#3/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#3/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#3/1 &zone->lock irq_context: 0 &type->s_umount_key#3/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->s_umount_key#3/1 &____s->seqcount irq_context: 0 &type->s_umount_key#3/1 &c->lock irq_context: 0 &type->s_umount_key#3/1 &sb->s_type->i_lock_key#3 irq_context: 0 &type->s_umount_key#3/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#3/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#3/1 &sb->s_type->i_lock_key#3 &dentry->d_lock irq_context: 0 &type->s_umount_key#3/1 &dentry->d_lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex cpuhp_state-down irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex cpuhp_state-up irq_context: 0 proc_subdir_lock irq_context: 0 proc_subdir_lock irq_context: 0 pernet_ops_rwsem pool_lock#2 irq_context: 0 pernet_ops_rwsem proc_subdir_lock irq_context: 0 pernet_ops_rwsem proc_subdir_lock irq_context: 0 &type->s_umount_key#4/1 irq_context: 0 &type->s_umount_key#4/1 pool_lock#2 irq_context: 0 &type->s_umount_key#4/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#4/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#4/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#4/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#4/1 sb_lock irq_context: 0 &type->s_umount_key#4/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#4/1 &sb->s_type->i_lock_key#4 irq_context: 0 &type->s_umount_key#4/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#4/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#4/1 &sb->s_type->i_lock_key#4 &dentry->d_lock irq_context: 0 &type->s_umount_key#4/1 &dentry->d_lock irq_context: 0 cgroup_mutex pcpu_alloc_mutex irq_context: 0 cgroup_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 cgroup_mutex pool_lock#2 irq_context: 0 cgroup_mutex lock irq_context: 0 cgroup_mutex lock kernfs_idr_lock irq_context: 0 cgroup_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 cgroup_mutex &root->kernfs_rwsem irq_context: 0 cgroup_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 cgroup_mutex &obj_hash[i].lock irq_context: 0 cgroup_mutex cgroup_file_kn_lock irq_context: 0 cgroup_mutex css_set_lock irq_context: 0 lock cgroup_idr_lock irq_context: 0 lock cgroup_idr_lock pool_lock#2 irq_context: 0 cpuset_mutex irq_context: 0 cpuset_mutex callback_lock irq_context: 0 cgroup_mutex &c->lock irq_context: 0 cgroup_mutex &____s->seqcount irq_context: 0 cgroup_mutex blkcg_pol_mutex irq_context: 0 cgroup_mutex blkcg_pol_mutex pcpu_alloc_mutex irq_context: 0 cgroup_mutex blkcg_pol_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 cgroup_mutex lock cgroup_idr_lock irq_context: 0 cgroup_mutex lock cgroup_idr_lock &c->lock irq_context: 0 cgroup_mutex lock cgroup_idr_lock &pcp->lock &zone->lock irq_context: 0 cgroup_mutex lock cgroup_idr_lock &zone->lock irq_context: 0 cgroup_mutex lock cgroup_idr_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 cgroup_mutex lock cgroup_idr_lock &____s->seqcount irq_context: 0 cgroup_mutex lock cgroup_idr_lock pool_lock#2 irq_context: 0 cgroup_mutex &n->list_lock irq_context: 0 cgroup_mutex &pcp->lock &zone->lock irq_context: 0 cgroup_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 cgroup_mutex percpu_counters_lock irq_context: 0 cgroup_mutex shrinker_rwsem irq_context: 0 cgroup_mutex shrinker_rwsem pool_lock#2 irq_context: 0 cgroup_mutex shrinker_rwsem &c->lock irq_context: 0 cgroup_mutex shrinker_rwsem &n->list_lock irq_context: 0 cgroup_mutex shrinker_rwsem &____s->seqcount irq_context: 0 cgroup_mutex &obj_hash[i].lock pool_lock irq_context: 0 cgroup_mutex &base->lock irq_context: 0 cgroup_mutex &base->lock &obj_hash[i].lock irq_context: 0 cgroup_mutex batched_entropy_u8.lock irq_context: 0 cgroup_mutex batched_entropy_u8.lock crngs.lock irq_context: 0 cgroup_mutex &pgdat->memcg_lru.lock irq_context: 0 cgroup_mutex devcgroup_mutex irq_context: 0 cgroup_mutex &zone->lock irq_context: 0 cgroup_mutex cpu_hotplug_lock irq_context: 0 cgroup_mutex cpu_hotplug_lock freezer_mutex irq_context: 0 rcu_state.exp_mutex rcu_node_0 irq_context: 0 rcu_state.exp_mutex rcu_state.exp_wake_mutex irq_context: 0 rcu_state.exp_mutex rcu_state.exp_wake_mutex rcu_node_0 irq_context: 0 rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_lock irq_context: 0 rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_wq[0] irq_context: 0 rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_wq[1] irq_context: 0 init_sighand.siglock irq_context: 0 init_files.file_lock irq_context: 0 lock pidmap_lock irq_context: 0 lock pidmap_lock pool_lock#2 irq_context: 0 pidmap_lock irq_context: 0 cgroup_threadgroup_rwsem irq_context: 0 cgroup_threadgroup_rwsem css_set_lock irq_context: 0 cgroup_threadgroup_rwsem &p->pi_lock irq_context: 0 cgroup_threadgroup_rwsem &p->pi_lock &rq->__lock irq_context: 0 cgroup_threadgroup_rwsem tk_core.seq.seqcount irq_context: 0 cgroup_threadgroup_rwsem tasklist_lock irq_context: 0 cgroup_threadgroup_rwsem tasklist_lock init_task.pi_lock irq_context: 0 cgroup_threadgroup_rwsem tasklist_lock init_sighand.siglock irq_context: 0 cgroup_threadgroup_rwsem css_set_lock cgroup_file_kn_lock irq_context: 0 cgroup_threadgroup_rwsem css_set_lock &p->pi_lock irq_context: 0 cgroup_threadgroup_rwsem css_set_lock &p->pi_lock &rq->__lock irq_context: 0 &p->pi_lock irq_context: 0 &p->pi_lock &rq->__lock irq_context: 0 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &p->pi_lock &rq->__lock &obj_hash[i].lock irq_context: 0 &p->pi_lock &rq->__lock &base->lock irq_context: 0 &p->pi_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock &p->pi_lock irq_context: 0 rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 (kthreadd_done).wait.lock irq_context: 0 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sighand->siglock irq_context: 0 &p->alloc_lock irq_context: 0 &p->alloc_lock &____s->seqcount#2 irq_context: 0 fs_reclaim irq_context: 0 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 wq_pool_mutex &pool->lock/1 irq_context: 0 wq_pool_mutex fs_reclaim irq_context: 0 wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 wq_pool_mutex kthread_create_lock irq_context: 0 wq_pool_mutex &p->pi_lock irq_context: 0 wq_pool_mutex &p->pi_lock &rq->__lock irq_context: 0 wq_pool_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 wq_pool_mutex &rq->__lock irq_context: 0 kthread_create_lock irq_context: 0 cgroup_threadgroup_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cgroup_threadgroup_rwsem tasklist_lock &p->pi_lock irq_context: 0 cgroup_threadgroup_rwsem tasklist_lock &sighand->siglock irq_context: 0 wq_pool_mutex &x->wait irq_context: 0 wq_pool_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &x->wait irq_context: 0 &x->wait &p->pi_lock irq_context: 0 &x->wait &p->pi_lock &rq->__lock irq_context: 0 &x->wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 wq_pool_mutex &obj_hash[i].lock irq_context: 0 wq_pool_attach_mutex irq_context: 0 wq_mayday_lock irq_context: 0 &xa->xa_lock irq_context: 0 &pool->lock irq_context: 0 &pool->lock &p->pi_lock irq_context: 0 &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (&pool->mayday_timer) irq_context: 0 &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rnp->exp_poll_wq) irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rnp->exp_poll_wq) &rnp->exp_poll_lock irq_context: 0 &pool->lock/1 irq_context: 0 &pool->lock/1 &p->pi_lock irq_context: 0 &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (&wq_watchdog_timer) irq_context: 0 &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) allocation_wait.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: hardirq allocation_wait.lock irq_context: hardirq allocation_wait.lock &p->pi_lock irq_context: hardirq allocation_wait.lock &p->pi_lock &rq->__lock irq_context: hardirq allocation_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 batched_entropy_u8.lock irq_context: 0 kfence_freelist_lock irq_context: 0 rcu_tasks.cbs_gbl_lock irq_context: 0 rcu_tasks.cbs_gbl_lock rcu_tasks__percpu.cbs_pcpu_lock irq_context: 0 rcu_tasks.cbs_gbl_lock rcu_tasks__percpu.cbs_pcpu_lock &obj_hash[i].lock irq_context: 0 rcu_tasks.cbs_gbl_lock &ACCESS_PRIVATE(rtpcp, lock) irq_context: 0 rcu_tasks.cbs_gbl_lock &ACCESS_PRIVATE(rtpcp, lock) &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) &base->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) &base->lock &obj_hash[i].lock irq_context: 0 rcu_tasks_trace.cbs_gbl_lock irq_context: 0 rcu_tasks_trace.cbs_gbl_lock rcu_tasks_trace__percpu.cbs_pcpu_lock irq_context: 0 rcu_tasks_trace.cbs_gbl_lock rcu_tasks_trace__percpu.cbs_pcpu_lock &obj_hash[i].lock irq_context: 0 rcu_tasks_trace.cbs_gbl_lock rcu_tasks_trace__percpu.cbs_pcpu_lock &obj_hash[i].lock pool_lock irq_context: 0 rcu_tasks_trace.cbs_gbl_lock &ACCESS_PRIVATE(rtpcp, lock) irq_context: 0 rcu_tasks_trace.cbs_gbl_lock &ACCESS_PRIVATE(rtpcp, lock) &obj_hash[i].lock irq_context: 0 rcu_tasks.tasks_gp_mutex irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_tasks.cbs_gbl_lock irq_context: 0 rcu_tasks.tasks_gp_mutex &rq->__lock irq_context: 0 rcu_tasks.tasks_gp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: hardirq rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &x->wait#2 irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_tasks__percpu.cbs_pcpu_lock irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex rcu_state.exp_wake_mutex irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex rcu_state.exp_wake_mutex rcu_node_0 irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_lock irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_wq[2] irq_context: 0 rcu_tasks.tasks_gp_mutex &obj_hash[i].lock irq_context: 0 rcu_tasks.tasks_gp_mutex &base->lock irq_context: 0 rcu_tasks.tasks_gp_mutex &base->lock &obj_hash[i].lock irq_context: 0 rcu_tasks.tasks_gp_mutex tasks_rcu_exit_srcu_srcu_usage.lock irq_context: 0 rcu_tasks.tasks_gp_mutex tasks_rcu_exit_srcu_srcu_usage.lock &obj_hash[i].lock irq_context: 0 rcu_tasks.tasks_gp_mutex &ACCESS_PRIVATE(sdp, lock) irq_context: 0 rcu_tasks.tasks_gp_mutex tasks_rcu_exit_srcu irq_context: 0 rcu_tasks.tasks_gp_mutex tasks_rcu_exit_srcu_srcu_usage.lock &ACCESS_PRIVATE(sdp, lock) irq_context: 0 rcu_tasks.tasks_gp_mutex tasks_rcu_exit_srcu_srcu_usage.lock rcu_read_lock &pool->lock irq_context: 0 rcu_tasks.tasks_gp_mutex tasks_rcu_exit_srcu_srcu_usage.lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rcu_tasks.tasks_gp_mutex tasks_rcu_exit_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rcu_tasks.tasks_gp_mutex tasks_rcu_exit_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rcu_tasks.tasks_gp_mutex tasks_rcu_exit_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex tasks_rcu_exit_srcu_srcu_usage.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) tasks_rcu_exit_srcu_srcu_usage.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &base->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rcu_tasks.tasks_gp_mutex &x->wait#3 irq_context: 0 rcu_tasks_trace.tasks_gp_mutex irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_tasks_trace.cbs_gbl_lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex &rq->__lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (null) irq_context: 0 (null) tk_core.seq.seqcount irq_context: softirq &(&ssp->srcu_sup->work)->timer irq_context: softirq &(&ssp->srcu_sup->work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&ssp->srcu_sup->work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&ssp->srcu_sup->work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&ssp->srcu_sup->work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&ssp->srcu_sup->work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &ssp->srcu_sup->srcu_cb_mutex irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &ssp->srcu_sup->srcu_cb_mutex tasks_rcu_exit_srcu_srcu_usage.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &ACCESS_PRIVATE(sdp, lock) irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &x->wait#3 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &x->wait#3 &p->pi_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &x->wait#3 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &x->wait#3 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &rq->__lock irq_context: 0 rcu_tasks.tasks_gp_mutex kernel/rcu/tasks.h:147 irq_context: softirq &(&kfence_timer)->timer irq_context: softirq &(&kfence_timer)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&kfence_timer)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&kfence_timer)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq &(&kfence_timer)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&kfence_timer)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&timer.timer) irq_context: softirq (&timer.timer) &p->pi_lock irq_context: softirq (&timer.timer) &p->pi_lock &rq->__lock irq_context: softirq (&timer.timer) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_tasks.tasks_gp_mutex (&timer.timer) irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_wq[3] irq_context: 0 rcu_tasks.tasks_gp_mutex &x->wait#2 irq_context: 0 rcu_tasks.tasks_gp_mutex &x->wait#2 &p->pi_lock irq_context: 0 rcu_tasks.tasks_gp_mutex &x->wait#2 &p->pi_lock &rq->__lock irq_context: 0 rcu_tasks.tasks_gp_mutex &x->wait#2 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_tasks_trace__percpu.cbs_pcpu_lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex cpu_hotplug_lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex cpu_hotplug_lock rcu_tasks_trace__percpu.cbs_pcpu_lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex cpu_hotplug_lock &ACCESS_PRIVATE(rtpcp, lock) irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex rcu_state.exp_wake_mutex irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex rcu_state.exp_wake_mutex rcu_node_0 irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_wq[0] irq_context: 0 rcu_tasks_trace.tasks_gp_mutex &x->wait#2 irq_context: 0 rcu_tasks_trace.tasks_gp_mutex &x->wait#2 &p->pi_lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex &x->wait#2 &p->pi_lock &rq->__lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex &x->wait#2 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &nmi_desc[0].lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock fs_reclaim irq_context: 0 cpu_hotplug_lock smpboot_threads_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cpu_hotplug_lock smpboot_threads_lock batched_entropy_u8.lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock kfence_freelist_lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock pool_lock#2 irq_context: 0 cpu_hotplug_lock smpboot_threads_lock kthread_create_lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock &p->pi_lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock smpboot_threads_lock &x->wait irq_context: 0 cpu_hotplug_lock smpboot_threads_lock &rq->__lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock smpboot_threads_lock &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock &c->lock irq_context: 0 cpu_hotplug_lock smpboot_threads_lock &n->list_lock irq_context: 0 &rcu_state.gp_wq irq_context: 0 &stop_pi_lock irq_context: 0 &stop_pi_lock &rq->__lock irq_context: 0 &stop_pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &stopper->lock irq_context: 0 (module_notify_list).rwsem irq_context: 0 ddebug_lock irq_context: 0 &pmus_srcu irq_context: 0 watchdog_mutex irq_context: 0 watchdog_mutex cpu_hotplug_lock irq_context: 0 watchdog_mutex cpu_hotplug_lock &obj_hash[i].lock irq_context: 0 watchdog_mutex cpu_hotplug_lock rcu_read_lock &pool->lock irq_context: 0 watchdog_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 watchdog_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 watchdog_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 watchdog_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 watchdog_mutex cpu_hotplug_lock &x->wait#4 irq_context: 0 watchdog_mutex cpu_hotplug_lock &rq->__lock irq_context: 0 watchdog_mutex cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events irq_context: 0 (wq_completion)events (work_completion)(&sscs.work) irq_context: 0 (wq_completion)events (work_completion)(&sscs.work) &x->wait#5 irq_context: 0 (wq_completion)events (work_completion)(&sscs.work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&sscs.work) hrtimer_bases.lock irq_context: 0 (wq_completion)events (work_completion)(&sscs.work) hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 (wq_completion)events (work_completion)(&sscs.work) hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&sscs.work) &x->wait#4 irq_context: 0 (wq_completion)events (work_completion)(&sscs.work) &x->wait#4 &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&sscs.work) &x->wait#4 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&sscs.work) &x->wait#4 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &rcu_state.gp_wq &p->pi_lock irq_context: softirq &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: softirq &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_callback &obj_hash[i].lock irq_context: softirq rcu_callback pool_lock#2 irq_context: 0 &newf->file_lock irq_context: 0 init_fs.lock &dentry->d_lock irq_context: 0 &p->vtime.seqcount irq_context: 0 cpu_hotplug_lock mem_hotplug_lock irq_context: 0 cpu_hotplug_lock mem_hotplug_lock mem_hotplug_lock.rss.gp_wait.lock irq_context: 0 cpu_hotplug_lock mem_hotplug_lock rcu_state.exp_mutex rcu_node_0 irq_context: 0 cpu_hotplug_lock mem_hotplug_lock rcu_state.exp_mutex rcu_state.exp_wake_mutex irq_context: 0 cpu_hotplug_lock mem_hotplug_lock rcu_state.exp_mutex rcu_state.exp_wake_mutex rcu_node_0 irq_context: 0 cpu_hotplug_lock mem_hotplug_lock rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_lock irq_context: 0 cpu_hotplug_lock mem_hotplug_lock rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_wq[1] irq_context: 0 cpu_hotplug_lock mem_hotplug_lock.waiters.lock irq_context: 0 cpu_hotplug_lock mem_hotplug_lock.rss.gp_wait.lock irq_context: 0 cpu_hotplug_lock mem_hotplug_lock.rss.gp_wait.lock &obj_hash[i].lock irq_context: 0 cpu_add_remove_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock cpu_hotplug_lock.rss.gp_wait.lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock rcu_state.exp_mutex rcu_node_0 irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock rcu_state.exp_mutex rcu_state.exp_wake_mutex irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock rcu_state.exp_mutex rcu_state.exp_wake_mutex rcu_node_0 irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_wq[2] irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock.waiters.lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock.rss.gp_wait.lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock.rss.gp_wait.lock &obj_hash[i].lock irq_context: 0 cpu_add_remove_lock spec_ctrl_mutex irq_context: 0 cpu_add_remove_lock spec_ctrl_mutex cpu_hotplug_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock irq_context: 0 cpu_add_remove_lock cpuset_hotplug_work irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock fs_reclaim irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock pool_lock#2 irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock kthread_create_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock &p->pi_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock &p->pi_lock &rq->__lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock &rq->__lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock &x->wait irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock smpboot_threads_lock &obj_hash[i].lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &swhash->hlist_mutex irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock pmus_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock pmus_lock &cpuctx_mutex irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock pcp_batch_high_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &xa->xa_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock fs_reclaim irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock pool_lock#2 irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock kthread_create_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &p->pi_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &p->pi_lock &rq->__lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &x->wait irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &rq->__lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &obj_hash[i].lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock wq_pool_attach_mutex irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &pool->lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &pool->lock &p->pi_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock pcpu_alloc_mutex irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock pcpu_alloc_mutex pcpu_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock relay_channels_mutex irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock rcu_node_0 irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &c->lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &n->list_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &pcp->lock &zone->lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &zone->lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &____s->seqcount irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock text_mutex irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock free_vmap_area_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock vmap_area_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock init_mm.page_table_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock (console_sem).lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock console_lock console_srcu console_owner_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock console_lock console_srcu console_owner irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock rtc_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock tk_core.seq.seqcount irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock rtc_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock &x->wait#6 irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock &rq->__lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rq->__lock &rq->__lock/1 irq_context: 0 &rq->__lock/1 irq_context: 0 &x->wait#6 irq_context: 0 &x->wait#6 &p->pi_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock &p->pi_lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock &p->pi_lock &rq->__lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock sparse_irq_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &x->wait#6 irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 cpu_hotplug_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up irq_context: 0 cpu_hotplug_lock cpuhp_state-up smpboot_threads_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up smpboot_threads_lock &p->pi_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up smpboot_threads_lock &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up smpboot_threads_lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up smpboot_threads_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock cpuhp_state-up sparse_irq_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up sparse_irq_lock &irq_desc_lock_class irq_context: 0 cpu_hotplug_lock cpuhp_state-up &swhash->hlist_mutex irq_context: 0 cpu_hotplug_lock cpuhp_state-up pmus_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up pmus_lock &cpuctx_mutex irq_context: 0 cpu_hotplug_lock cpuhp_state-up &x->wait#5 irq_context: 0 cpu_hotplug_lock cpuhp_state-up &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up hrtimer_bases.lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 cpu_hotplug_lock cpuhp_state-up hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up wq_pool_mutex irq_context: 0 cpu_hotplug_lock cpuhp_state-up wq_pool_mutex wq_pool_attach_mutex irq_context: 0 cpu_hotplug_lock cpuhp_state-up wq_pool_mutex wq_pool_attach_mutex &p->pi_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up wq_pool_mutex wq_pool_attach_mutex &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up wq_pool_mutex wq_pool_attach_mutex &x->wait#7 irq_context: 0 cpu_hotplug_lock cpuhp_state-up wq_pool_mutex wq_pool_attach_mutex &pool->lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up wq_pool_mutex &wq->mutex irq_context: 0 cpu_hotplug_lock cpuhp_state-up wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 cpu_hotplug_lock cpuhp_state-up wq_pool_mutex &pool->lock/1 irq_context: 0 cpu_hotplug_lock cpuhp_state-up rcu_node_0 irq_context: 0 cpu_hotplug_lock cpuhp_state-up &rq->__lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up jump_label_mutex irq_context: 0 cpu_hotplug_lock cpuhp_state-up jump_label_mutex text_mutex irq_context: 0 cpu_hotplug_lock cpuhp_state-up jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 cpu_hotplug_lock cpuhp_state-up &rq->__lock &rt_b->rt_runtime_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &rq->__lock &rt_b->rt_runtime_lock &rt_rq->rt_runtime_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &rq->__lock rcu_read_lock &cfs_b->lock irq_context: hardirq &rq->__lock &cfs_rq->removed.lock irq_context: 0 cpu_add_remove_lock spec_ctrl_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 cpu_add_remove_lock spec_ctrl_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 cpu_add_remove_lock spec_ctrl_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 cpu_add_remove_lock spec_ctrl_mutex (console_sem).lock irq_context: 0 cpu_add_remove_lock spec_ctrl_mutex console_lock console_srcu console_owner_lock irq_context: 0 cpu_add_remove_lock spec_ctrl_mutex console_lock console_srcu console_owner irq_context: 0 cpu_add_remove_lock spec_ctrl_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 cpu_add_remove_lock spec_ctrl_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 cpu_add_remove_lock spec_ctrl_mutex &rq->__lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex rcu_state.exp_wake_mutex &rnp->exp_wq[3] irq_context: 0 rcu_tasks_trace.tasks_gp_mutex &obj_hash[i].lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex &base->lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex &base->lock &obj_hash[i].lock irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock jump_label_mutex irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 cpu_add_remove_lock cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 cpu_hotplug_lock stop_cpus_mutex irq_context: 0 cpu_hotplug_lock stop_cpus_mutex &stopper->lock irq_context: 0 cpu_hotplug_lock stop_cpus_mutex &stop_pi_lock irq_context: 0 cpu_hotplug_lock stop_cpus_mutex &stop_pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock stop_cpus_mutex &stop_pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock stop_cpus_mutex &rq->__lock irq_context: 0 &x->wait#8 irq_context: 0 cpu_hotplug_lock stop_cpus_mutex &x->wait#8 irq_context: 0 sched_domains_mutex irq_context: 0 sched_domains_mutex fs_reclaim irq_context: 0 sched_domains_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sched_domains_mutex pool_lock#2 irq_context: 0 sched_domains_mutex &obj_hash[i].lock irq_context: 0 sched_domains_mutex pcpu_alloc_mutex irq_context: 0 sched_domains_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 sched_domains_mutex &pcp->lock &zone->lock irq_context: 0 sched_domains_mutex &zone->lock irq_context: 0 sched_domains_mutex &____s->seqcount irq_context: 0 sched_domains_mutex rcu_read_lock pool_lock#2 irq_context: 0 sched_domains_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sched_domains_mutex &c->lock irq_context: 0 sched_domains_mutex rcu_read_lock &rq->__lock irq_context: 0 sched_domains_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sched_domains_mutex rcu_read_lock &rq->__lock &cp->lock irq_context: 0 sched_domains_mutex rcu_read_lock &rq->__lock &rt_b->rt_runtime_lock irq_context: 0 sched_domains_mutex rcu_read_lock &rq->__lock &rt_b->rt_runtime_lock &rt_rq->rt_runtime_lock irq_context: 0 sched_domains_mutex rcu_read_lock &rq->__lock rcu_read_lock &cfs_b->lock irq_context: 0 sched_domains_mutex pcpu_lock irq_context: 0 slab_mutex fs_reclaim irq_context: 0 slab_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (memory_chain).rwsem irq_context: 0 cpu_hotplug_lock wq_pool_mutex fs_reclaim irq_context: 0 cpu_hotplug_lock wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock pool_lock irq_context: 0 &type->s_umount_key#5/1 irq_context: 0 &type->s_umount_key#5/1 fs_reclaim irq_context: 0 &type->s_umount_key#5/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#5/1 pool_lock#2 irq_context: 0 &type->s_umount_key#5/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#5/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#5/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#5/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#5/1 sb_lock irq_context: 0 &type->s_umount_key#5/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#5/1 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#5/1 percpu_counters_lock irq_context: 0 &type->s_umount_key#5/1 crngs.lock irq_context: 0 &type->s_umount_key#5/1 &sbinfo->stat_lock irq_context: 0 &type->s_umount_key#5/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#5/1 &sb->s_type->i_lock_key#5 irq_context: 0 &type->s_umount_key#5/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#5/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#5/1 batched_entropy_u32.lock irq_context: 0 &type->s_umount_key#5/1 &sb->s_type->i_lock_key#5 &dentry->d_lock irq_context: 0 &type->s_umount_key#5/1 &dentry->d_lock irq_context: 0 (setup_done).wait.lock irq_context: 0 namespace_sem irq_context: 0 namespace_sem fs_reclaim irq_context: 0 namespace_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 namespace_sem &pcp->lock &zone->lock irq_context: 0 namespace_sem &zone->lock irq_context: 0 namespace_sem &____s->seqcount irq_context: 0 namespace_sem pool_lock#2 irq_context: 0 namespace_sem &c->lock irq_context: 0 namespace_sem mnt_id_ida.xa_lock irq_context: 0 namespace_sem pcpu_alloc_mutex irq_context: 0 namespace_sem pcpu_alloc_mutex pcpu_lock irq_context: 0 namespace_sem &dentry->d_lock irq_context: 0 namespace_sem mount_lock irq_context: 0 namespace_sem mount_lock mount_lock.seqcount irq_context: 0 &p->alloc_lock init_fs.lock irq_context: 0 rcu_read_lock &____s->seqcount#3 irq_context: 0 file_systems_lock irq_context: 0 &type->s_umount_key#6 irq_context: 0 &type->s_umount_key#6 fs_reclaim irq_context: 0 &type->s_umount_key#6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#6 pool_lock#2 irq_context: 0 &type->s_umount_key#6 &dentry->d_lock irq_context: 0 &type->s_umount_key#6 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#6 &zone->lock irq_context: 0 &type->s_umount_key#6 &____s->seqcount irq_context: 0 &type->s_umount_key#6 &c->lock irq_context: 0 &type->s_umount_key#6 &lru->node[i].lock irq_context: 0 &type->s_umount_key#6 &sbinfo->stat_lock irq_context: 0 &type->s_umount_key#6 rcu_read_lock &dentry->d_lock irq_context: 0 &type->s_umount_key#6 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key irq_context: 0 &sb->s_type->i_mutex_key namespace_sem irq_context: 0 &sb->s_type->i_mutex_key namespace_sem rcu_read_lock mount_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key namespace_sem fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key namespace_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key namespace_sem pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key namespace_sem rename_lock irq_context: 0 &sb->s_type->i_mutex_key namespace_sem rename_lock rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key namespace_sem rename_lock rename_lock.seqcount &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key namespace_sem mount_lock irq_context: 0 &sb->s_type->i_mutex_key namespace_sem mount_lock &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key namespace_sem mount_lock mount_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key namespace_sem mount_lock mount_lock.seqcount &new_ns->poll irq_context: 0 &sb->s_type->i_mutex_key namespace_sem mount_lock mount_lock.seqcount &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key namespace_sem mount_lock mount_lock.seqcount rcu_read_lock &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key namespace_sem mount_lock mount_lock.seqcount &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key namespace_sem mount_lock mount_lock.seqcount pool_lock#2 irq_context: 0 rcu_read_lock &sb->s_type->i_lock_key#2 irq_context: 0 rcu_read_lock &____s->seqcount#4 irq_context: 0 &sb->s_type->i_lock_key#5 irq_context: 0 &fs->lock irq_context: 0 &fs->lock &____s->seqcount#3 irq_context: 0 (setup_done).wait.lock &p->pi_lock irq_context: 0 (setup_done).wait.lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (setup_done).wait.lock &p->pi_lock &rq->__lock irq_context: 0 (setup_done).wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 req_lock irq_context: 0 of_mutex irq_context: 0 of_mutex fs_reclaim irq_context: 0 of_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 of_mutex pool_lock#2 irq_context: 0 of_mutex lock irq_context: 0 of_mutex lock kernfs_idr_lock irq_context: 0 of_mutex &root->kernfs_rwsem irq_context: 0 of_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &x->wait#9 irq_context: 0 &k->list_lock irq_context: 0 bus_type_sem irq_context: 0 &root->kernfs_rwsem irq_context: 0 &dev->power.lock irq_context: 0 dpm_list_mtx irq_context: 0 uevent_sock_mutex irq_context: 0 running_helpers_waitq.lock irq_context: 0 sysfs_symlink_target_lock irq_context: 0 &k->k_lock irq_context: 0 &dev->mutex &k->list_lock irq_context: 0 &dev->mutex &k->k_lock irq_context: 0 &dev->mutex &dev->power.lock irq_context: 0 subsys mutex irq_context: 0 memory_blocks.xa_lock irq_context: 0 memory_blocks.xa_lock &pcp->lock &zone->lock irq_context: 0 memory_blocks.xa_lock &zone->lock irq_context: 0 memory_blocks.xa_lock &____s->seqcount irq_context: 0 memory_blocks.xa_lock pool_lock#2 irq_context: 0 memory_blocks.xa_lock rcu_read_lock pool_lock#2 irq_context: 0 memory_blocks.xa_lock &obj_hash[i].lock irq_context: 0 memory_blocks.xa_lock &pcp->lock &zone->lock &____s->seqcount irq_context: softirq &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 lock kernfs_idr_lock &c->lock irq_context: 0 lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 lock kernfs_idr_lock &zone->lock irq_context: 0 lock kernfs_idr_lock &____s->seqcount irq_context: softirq (&timer.timer) &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq rcu_callback mem_hotplug_lock.rss.gp_wait.lock irq_context: softirq rcu_callback mem_hotplug_lock.rss.gp_wait.lock &obj_hash[i].lock irq_context: softirq rcu_callback cpu_hotplug_lock.rss.gp_wait.lock irq_context: softirq rcu_callback cpu_hotplug_lock.rss.gp_wait.lock &obj_hash[i].lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex (&timer.timer) irq_context: 0 rcu_tasks_trace.tasks_gp_mutex (console_sem).lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex console_lock console_srcu console_owner_lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex console_lock console_srcu console_owner irq_context: 0 rcu_tasks_trace.tasks_gp_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 rcu_tasks_trace.tasks_gp_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 subsys mutex#2 irq_context: 0 register_lock irq_context: 0 register_lock proc_subdir_lock irq_context: 0 register_lock fs_reclaim irq_context: 0 register_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 register_lock pool_lock#2 irq_context: 0 register_lock proc_inum_ida.xa_lock irq_context: 0 register_lock proc_subdir_lock irq_context: 0 register_lock &c->lock irq_context: 0 register_lock &pcp->lock &zone->lock irq_context: 0 register_lock &zone->lock irq_context: 0 register_lock &____s->seqcount irq_context: 0 register_lock proc_inum_ida.xa_lock &pcp->lock &zone->lock irq_context: 0 register_lock proc_inum_ida.xa_lock &zone->lock irq_context: 0 register_lock proc_inum_ida.xa_lock &____s->seqcount irq_context: 0 register_lock proc_inum_ida.xa_lock pool_lock#2 irq_context: 0 register_lock proc_inum_ida.xa_lock &c->lock irq_context: 0 register_lock proc_inum_ida.xa_lock rcu_read_lock pool_lock#2 irq_context: 0 register_lock proc_inum_ida.xa_lock &obj_hash[i].lock irq_context: 0 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rcu_read_lock &rq->__lock irq_context: 0 (pm_chain_head).rwsem irq_context: 0 cpufreq_governor_mutex irq_context: softirq rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 rcu_state.exp_mutex &rq->__lock irq_context: 0 rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 rcu_state.exp_mutex rcu_read_lock &stopper->lock irq_context: 0 rcu_state.exp_mutex rcu_read_lock &stop_pi_lock irq_context: 0 &pool->lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_node_0 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex rcu_node_0 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[2] &p->pi_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[2] &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[2] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[3] &p->pi_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[3] &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[3] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dyn_event_ops_mutex irq_context: 0 binfmt_lock irq_context: 0 pin_fs_lock irq_context: 0 &type->s_umount_key#7/1 irq_context: 0 &type->s_umount_key#7/1 fs_reclaim irq_context: 0 &type->s_umount_key#7/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#7/1 pool_lock#2 irq_context: 0 &type->s_umount_key#7/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#7/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#7/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#7/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#7/1 sb_lock irq_context: 0 &type->s_umount_key#7/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#7/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#7/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->s_umount_key#7/1 &____s->seqcount irq_context: 0 &type->s_umount_key#7/1 &c->lock irq_context: 0 &type->s_umount_key#7/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#7/1 &sb->s_type->i_lock_key#6 irq_context: 0 &type->s_umount_key#7/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#7/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#7/1 &sb->s_type->i_lock_key#6 &dentry->d_lock irq_context: 0 &type->s_umount_key#7/1 &dentry->d_lock irq_context: 0 rcu_read_lock mount_lock irq_context: 0 rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#2 irq_context: 0 &sb->s_type->i_mutex_key#2 &sb->s_type->i_lock_key#6 irq_context: 0 &sb->s_type->i_mutex_key#2 rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#2 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#2 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#2 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#2 rcu_read_lock rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#2 &dentry->d_lock &wq irq_context: 0 &sb->s_type->i_mutex_key#2 mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#2 &s->s_inode_list_lock irq_context: 0 &sb->s_type->i_mutex_key#2 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#2 &sb->s_type->i_lock_key#6 &dentry->d_lock irq_context: 0 chrdevs_lock irq_context: 0 cb_lock irq_context: 0 cb_lock genl_mutex irq_context: 0 cb_lock genl_mutex fs_reclaim irq_context: 0 cb_lock genl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex pool_lock#2 irq_context: 0 &type->s_umount_key#8/1 irq_context: 0 &type->s_umount_key#8/1 fs_reclaim irq_context: 0 &type->s_umount_key#8/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#8/1 pool_lock#2 irq_context: 0 &type->s_umount_key#8/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#8/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#8/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#8/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#8/1 sb_lock irq_context: 0 &type->s_umount_key#8/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#8/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#8/1 &sb->s_type->i_lock_key#7 irq_context: 0 &type->s_umount_key#8/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#8/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#8/1 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 &type->s_umount_key#8/1 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#3 irq_context: 0 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 subsys mutex#3 irq_context: 0 async_lock irq_context: 0 rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex device_links_srcu irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &dev->power.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex fwnode_link_lock irq_context: 0 regulator_list_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex device_links_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &k->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex pool_lock#2 irq_context: 0 rtc_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &x->wait#9 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &dev->devres_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex regulator_nesting_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex regulator_ww_class_mutex regulator_nesting_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex devtree_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex gdp_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex gdp_mutex &k->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex gdp_mutex fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex gdp_mutex pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex gdp_mutex lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex bus_type_sem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex dpm_list_mtx irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex uevent_sock_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex running_helpers_waitq.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex subsys mutex#4 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex subsys mutex#4 &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex pin_fs_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex regulator_list_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex deferred_probe_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex probe_waitqueue.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) async_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) async_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) async_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) async_done.lock irq_context: 0 &type->s_umount_key#9/1 irq_context: 0 &type->s_umount_key#9/1 fs_reclaim irq_context: 0 &type->s_umount_key#9/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#9/1 pool_lock#2 irq_context: 0 &type->s_umount_key#9/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#9/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#9/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#9/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#9/1 sb_lock irq_context: 0 &type->s_umount_key#9/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#9/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#9/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->s_umount_key#9/1 &____s->seqcount irq_context: 0 &type->s_umount_key#9/1 &c->lock irq_context: 0 &type->s_umount_key#9/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#9/1 &sb->s_type->i_lock_key#8 irq_context: 0 &type->s_umount_key#9/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#9/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#9/1 &sb->s_type->i_lock_key#8 &dentry->d_lock irq_context: 0 &type->s_umount_key#9/1 &dentry->d_lock irq_context: 0 pernet_ops_rwsem fs_reclaim irq_context: 0 pernet_ops_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem &____s->seqcount irq_context: 0 pernet_ops_rwsem &c->lock irq_context: 0 pernet_ops_rwsem sysctl_lock irq_context: 0 pack_mutex irq_context: 0 pack_mutex fs_reclaim irq_context: 0 pack_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pack_mutex &pcp->lock &zone->lock irq_context: 0 pack_mutex &zone->lock irq_context: 0 pack_mutex &____s->seqcount irq_context: 0 pack_mutex pool_lock#2 irq_context: 0 pack_mutex free_vmap_area_lock irq_context: 0 pack_mutex vmap_area_lock irq_context: 0 pack_mutex init_mm.page_table_lock irq_context: 0 pack_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 pack_mutex &rq->__lock irq_context: 0 pack_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pack_mutex vmap_purge_lock irq_context: 0 pack_mutex vmap_purge_lock purge_vmap_area_lock irq_context: 0 pack_mutex cpa_lock irq_context: 0 pack_mutex cpa_lock pgd_lock irq_context: 0 text_mutex irq_context: 0 text_mutex ptlock_ptr(page)#2 irq_context: 0 &fp->aux->used_maps_mutex irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex pcpu_lock irq_context: 0 proto_list_mutex irq_context: 0 targets_mutex irq_context: 0 nl_table_lock irq_context: 0 nl_table_wait.lock irq_context: 0 net_family_lock irq_context: 0 pernet_ops_rwsem net_generic_ids.xa_lock irq_context: 0 pernet_ops_rwsem mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem &sb->s_type->i_lock_key#8 irq_context: 0 pernet_ops_rwsem &dir->lock irq_context: 0 pernet_ops_rwsem &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK k-slock-AF_NETLINK irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock rhashtable_bucket irq_context: 0 pernet_ops_rwsem k-slock-AF_NETLINK irq_context: 0 pernet_ops_rwsem nl_table_lock irq_context: 0 pernet_ops_rwsem nl_table_wait.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex irq_context: 0 rtnl_mutex fs_reclaim irq_context: 0 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sparse_irq_lock irq_context: 0 sparse_irq_lock fs_reclaim irq_context: 0 sparse_irq_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sparse_irq_lock pool_lock#2 irq_context: 0 sparse_irq_lock lock irq_context: 0 sparse_irq_lock lock kernfs_idr_lock irq_context: 0 sparse_irq_lock &root->kernfs_rwsem irq_context: 0 sparse_irq_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sparse_irq_lock &c->lock irq_context: 0 sparse_irq_lock &____s->seqcount irq_context: 0 sparse_irq_lock lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 sparse_irq_lock lock kernfs_idr_lock &zone->lock irq_context: 0 sparse_irq_lock lock kernfs_idr_lock &____s->seqcount irq_context: 0 sparse_irq_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 sparse_irq_lock lock kernfs_idr_lock rcu_read_lock pool_lock#2 irq_context: 0 sparse_irq_lock lock kernfs_idr_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock &stopper->lock irq_context: 0 rcu_read_lock &stop_pi_lock irq_context: 0 rcu_read_lock &stop_pi_lock &rq->__lock irq_context: 0 rcu_read_lock &stop_pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 freezer_lock irq_context: 0 audit_backlog_wait.lock irq_context: 0 kauditd_wait.lock irq_context: 0 &list->lock irq_context: 0 kauditd_wait.lock &p->pi_lock irq_context: 0 kauditd_wait.lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 kauditd_wait.lock &p->pi_lock &rq->__lock irq_context: 0 kauditd_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 lock#2 irq_context: 0 lock#2 &zone->lock irq_context: 0 pcp_batch_high_lock irq_context: 0 khugepaged_mutex irq_context: 0 gdp_mutex irq_context: 0 gdp_mutex &k->list_lock irq_context: 0 gdp_mutex fs_reclaim irq_context: 0 gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 gdp_mutex pool_lock#2 irq_context: 0 gdp_mutex lock irq_context: 0 gdp_mutex lock kernfs_idr_lock irq_context: 0 gdp_mutex &root->kernfs_rwsem irq_context: 0 gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 subsys mutex#5 irq_context: 0 subsys mutex#5 &k->k_lock irq_context: 0 subsys mutex#6 irq_context: 0 subsys mutex#6 &k->list_lock irq_context: 0 subsys mutex#6 &k->k_lock irq_context: 0 regmap_debugfs_early_lock irq_context: 0 (acpi_reconfig_chain).rwsem irq_context: 0 __i2c_board_lock irq_context: 0 core_lock irq_context: 0 core_lock &k->list_lock irq_context: 0 core_lock &k->k_lock irq_context: 0 cb_lock genl_mutex nl_table_lock irq_context: 0 cb_lock genl_mutex nl_table_wait.lock irq_context: 0 nl_table_lock irq_context: 0 thermal_governor_lock irq_context: 0 thermal_governor_lock thermal_list_lock irq_context: 0 cpuidle_lock irq_context: 0 cpuidle_lock rcu_state.exp_mutex rcu_node_0 irq_context: 0 cpuidle_lock rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 cpuidle_lock rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 cpuidle_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cpuidle_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cpuidle_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cpuidle_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpuidle_lock rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 cpuidle_lock rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[0] &p->pi_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[0] &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[0] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpuidle_lock &obj_hash[i].lock irq_context: 0 cpuidle_lock (console_sem).lock irq_context: 0 cpuidle_lock console_lock console_srcu console_owner_lock irq_context: 0 cpuidle_lock console_lock console_srcu console_owner irq_context: 0 cpuidle_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 cpuidle_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_lock_key#8 irq_context: 0 &dir->lock irq_context: 0 k-sk_lock-AF_QIPCRTR irq_context: 0 k-sk_lock-AF_QIPCRTR k-slock-AF_QIPCRTR irq_context: 0 k-slock-AF_QIPCRTR irq_context: 0 k-sk_lock-AF_QIPCRTR fs_reclaim irq_context: 0 k-sk_lock-AF_QIPCRTR fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 k-sk_lock-AF_QIPCRTR qrtr_ports.xa_lock irq_context: 0 k-sk_lock-AF_QIPCRTR pool_lock#2 irq_context: 0 k-sk_lock-AF_QIPCRTR qrtr_node_lock irq_context: 0 k-sk_lock-AF_QIPCRTR &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem &zone->lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex irq_context: softirq &(&kfence_timer)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &rcu_state.expedited_wq irq_context: hardirq &rcu_state.expedited_wq irq_context: hardirq &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[1] irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[1] &p->pi_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[1] &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[1] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_read_lock &rq->__lock irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 batched_entropy_u8.lock crngs.lock irq_context: 0 &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#3 &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#3 rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 crngs.lock irq_context: 0 (crypto_chain).rwsem irq_context: 0 iova_cache_mutex irq_context: 0 iova_cache_mutex cpu_hotplug_lock irq_context: 0 iova_cache_mutex cpu_hotplug_lock cpuhp_state_mutex irq_context: 0 iova_cache_mutex slab_mutex irq_context: 0 iova_cache_mutex slab_mutex fs_reclaim irq_context: 0 iova_cache_mutex slab_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 iova_cache_mutex slab_mutex pool_lock#2 irq_context: 0 iova_cache_mutex slab_mutex &c->lock irq_context: 0 iova_cache_mutex slab_mutex &n->list_lock irq_context: 0 iova_cache_mutex slab_mutex pcpu_alloc_mutex irq_context: 0 iova_cache_mutex slab_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 subsys mutex#7 irq_context: 0 subsys mutex#7 &k->k_lock irq_context: 0 pci_config_lock irq_context: 0 device_links_lock irq_context: 0 subsys mutex#8 irq_context: 0 dev_pm_qos_mtx irq_context: 0 dev_pm_qos_mtx fs_reclaim irq_context: 0 dev_pm_qos_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 dev_pm_qos_mtx pool_lock#2 irq_context: 0 dev_pm_qos_mtx &dev->power.lock irq_context: 0 dev_pm_qos_mtx pm_qos_lock irq_context: 0 dev_pm_qos_sysfs_mtx irq_context: 0 dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 dev_pm_qos_sysfs_mtx fs_reclaim irq_context: 0 dev_pm_qos_sysfs_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 dev_pm_qos_sysfs_mtx pool_lock#2 irq_context: 0 dev_pm_qos_sysfs_mtx lock irq_context: 0 dev_pm_qos_sysfs_mtx lock kernfs_idr_lock irq_context: 0 dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 dev_pm_qos_sysfs_mtx &c->lock irq_context: 0 dev_pm_qos_sysfs_mtx &____s->seqcount irq_context: 0 mtrr_mutex irq_context: 0 mtrr_mutex fs_reclaim irq_context: 0 mtrr_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 mtrr_mutex pool_lock#2 irq_context: 0 uidhash_lock irq_context: 0 &rq->__lock rcu_read_lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &x->wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex (console_sem).lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex console_lock console_srcu console_owner_lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex console_lock console_srcu console_owner irq_context: 0 cpu_hotplug_lock wq_pool_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 cpu_hotplug_lock wq_pool_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 detected_devices_mutex irq_context: 0 sb_writers#2 irq_context: 0 sb_writers#2 mount_lock irq_context: 0 oom_reaper_wait.lock irq_context: 0 subsys mutex#9 irq_context: 0 &pgdat->kcompactd_wait irq_context: 0 slab_mutex rcu_read_lock &pool->lock irq_context: 0 slab_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 slab_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 slab_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 slab_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 slab_mutex &rq->__lock irq_context: 0 (wq_completion)events pcpu_balance_work irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex pcpu_lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex fs_reclaim irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex pool_lock#2 irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex free_vmap_area_lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex vmap_area_lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &zone->lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &____s->seqcount irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex init_mm.page_table_lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &c->lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &obj_hash[i].lock irq_context: 0 memory_tier_lock irq_context: 0 memory_tier_lock fs_reclaim irq_context: 0 memory_tier_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 memory_tier_lock pool_lock#2 irq_context: 0 memory_tier_lock &x->wait#9 irq_context: 0 memory_tier_lock &obj_hash[i].lock irq_context: 0 memory_tier_lock &k->list_lock irq_context: 0 memory_tier_lock lock irq_context: 0 memory_tier_lock lock kernfs_idr_lock irq_context: 0 memory_tier_lock &root->kernfs_rwsem irq_context: 0 memory_tier_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 memory_tier_lock bus_type_sem irq_context: 0 memory_tier_lock sysfs_symlink_target_lock irq_context: 0 memory_tier_lock &pcp->lock &zone->lock irq_context: 0 memory_tier_lock &zone->lock irq_context: 0 memory_tier_lock &____s->seqcount irq_context: 0 memory_tier_lock rcu_read_lock pool_lock#2 irq_context: 0 memory_tier_lock &k->k_lock irq_context: 0 memory_tier_lock &root->kernfs_rwsem irq_context: 0 memory_tier_lock &c->lock irq_context: 0 memory_tier_lock &dev->power.lock irq_context: 0 memory_tier_lock dpm_list_mtx irq_context: 0 memory_tier_lock uevent_sock_mutex irq_context: 0 memory_tier_lock running_helpers_waitq.lock irq_context: 0 memory_tier_lock &dev->mutex &k->list_lock irq_context: 0 memory_tier_lock &dev->mutex &k->k_lock irq_context: 0 memory_tier_lock &dev->mutex &dev->power.lock irq_context: 0 memory_tier_lock subsys mutex#10 irq_context: 0 memory_tier_lock rcu_state.exp_mutex rcu_node_0 irq_context: 0 memory_tier_lock rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 memory_tier_lock rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 memory_tier_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 memory_tier_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 memory_tier_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 memory_tier_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 memory_tier_lock rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 memory_tier_lock rcu_state.exp_mutex &rq->__lock irq_context: 0 memory_tier_lock rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: hardirq &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rq->__lock irq_context: 0 khugepaged_mutex fs_reclaim irq_context: 0 khugepaged_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 khugepaged_mutex pool_lock#2 irq_context: 0 khugepaged_mutex kthread_create_lock irq_context: 0 khugepaged_mutex &p->pi_lock irq_context: 0 khugepaged_mutex &p->pi_lock &rq->__lock irq_context: 0 khugepaged_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 khugepaged_mutex &x->wait irq_context: 0 khugepaged_mutex &rq->__lock irq_context: 0 khugepaged_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ksm_thread_mutex irq_context: 0 ksm_thread_wait.lock irq_context: 0 khugepaged_mutex &obj_hash[i].lock irq_context: 0 khugepaged_mutex lock#2 irq_context: 0 khugepaged_mutex lock#2 &zone->lock irq_context: 0 khugepaged_mutex pcp_batch_high_lock irq_context: 0 damon_ops_lock irq_context: 0 crypto_alg_sem irq_context: 0 crypto_alg_sem (crypto_chain).rwsem irq_context: 0 cpu_hotplug_lock fs_reclaim irq_context: 0 cpu_hotplug_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cpu_hotplug_lock pool_lock#2 irq_context: 0 cpu_hotplug_lock pcpu_alloc_mutex irq_context: 0 cpu_hotplug_lock pcpu_alloc_mutex pcpu_lock irq_context: 0 cpu_hotplug_lock &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock &wq->mutex irq_context: 0 cpu_hotplug_lock &wq->mutex &pool->lock irq_context: 0 cpu_hotplug_lock kthread_create_lock irq_context: 0 cpu_hotplug_lock &p->pi_lock irq_context: 0 cpu_hotplug_lock &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock &x->wait irq_context: 0 cpu_hotplug_lock &rq->__lock irq_context: 0 cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 lock#3 irq_context: 0 khugepaged_mm_lock irq_context: 0 khugepaged_wait.lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pool->lock/1 irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pool->lock/1 rcu_read_lock &pool->lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pool->lock/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pool->lock/1 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock wq_pool_mutex quarantine_lock irq_context: 0 cpu_hotplug_lock remove_cache_srcu irq_context: 0 cpu_hotplug_lock remove_cache_srcu quarantine_lock irq_context: 0 cpu_hotplug_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &wq->mutex irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) wq_pool_mutex irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) pool_lock#2 irq_context: 0 bio_slab_lock irq_context: 0 bio_slab_lock fs_reclaim irq_context: 0 bio_slab_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 bio_slab_lock pool_lock#2 irq_context: 0 bio_slab_lock slab_mutex irq_context: 0 bio_slab_lock slab_mutex fs_reclaim irq_context: 0 bio_slab_lock slab_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 bio_slab_lock slab_mutex pool_lock#2 irq_context: 0 bio_slab_lock slab_mutex &c->lock irq_context: 0 bio_slab_lock slab_mutex &n->list_lock irq_context: 0 bio_slab_lock slab_mutex pcpu_alloc_mutex irq_context: 0 bio_slab_lock slab_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 bio_slab_lock bio_slabs.xa_lock irq_context: 0 bio_slab_lock bio_slabs.xa_lock pool_lock#2 irq_context: 0 major_names_lock irq_context: 0 major_names_lock fs_reclaim irq_context: 0 major_names_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 major_names_lock pool_lock#2 irq_context: 0 major_names_lock major_names_spinlock irq_context: 0 console_lock fs_reclaim irq_context: 0 console_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 console_lock &x->wait#9 irq_context: 0 console_lock &k->list_lock irq_context: 0 console_lock gdp_mutex irq_context: 0 console_lock gdp_mutex &k->list_lock irq_context: 0 console_lock gdp_mutex fs_reclaim irq_context: 0 console_lock gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 console_lock gdp_mutex pool_lock#2 irq_context: 0 console_lock gdp_mutex lock irq_context: 0 console_lock gdp_mutex lock kernfs_idr_lock irq_context: 0 console_lock gdp_mutex &root->kernfs_rwsem irq_context: 0 console_lock gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 console_lock lock irq_context: 0 console_lock lock kernfs_idr_lock irq_context: 0 console_lock &root->kernfs_rwsem irq_context: 0 console_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 console_lock bus_type_sem irq_context: 0 console_lock sysfs_symlink_target_lock irq_context: 0 console_lock &root->kernfs_rwsem irq_context: 0 console_lock &dev->power.lock irq_context: 0 console_lock dpm_list_mtx irq_context: 0 console_lock uevent_sock_mutex irq_context: 0 console_lock running_helpers_waitq.lock irq_context: 0 console_lock subsys mutex#11 irq_context: 0 console_lock subsys mutex#11 &k->k_lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 irq_context: 0 rcu_tasks.tasks_gp_mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &base->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) (&timer.timer) irq_context: 0 rcu_tasks.tasks_gp_mutex (console_sem).lock irq_context: 0 rcu_tasks.tasks_gp_mutex console_lock console_srcu console_owner_lock irq_context: 0 rcu_tasks.tasks_gp_mutex console_lock console_srcu console_owner irq_context: 0 rcu_tasks.tasks_gp_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 rcu_tasks.tasks_gp_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &meta->lock irq_context: 0 quarantine_lock irq_context: 0 *(&acpi_gbl_hardware_lock) irq_context: 0 *(&acpi_gbl_gpe_lock) irq_context: 0 acpi_ioapic_lock ioapic_mutex irq_context: 0 &desc->request_mutex &irq_desc_lock_class vector_lock irq_context: 0 &desc->request_mutex &irq_desc_lock_class ioapic_lock irq_context: 0 &desc->request_mutex &irq_desc_lock_class mask_lock irq_context: 0 &desc->request_mutex &irq_desc_lock_class mask_lock tmp_mask_lock irq_context: 0 &desc->request_mutex &irq_desc_lock_class mask_lock tmp_mask_lock vector_lock irq_context: 0 &desc->request_mutex &irq_desc_lock_class mask_lock tmp_mask_lock ioapic_lock irq_context: 0 &desc->request_mutex &irq_desc_lock_class ioapic_lock i8259A_lock irq_context: 0 shrink_qlist.lock irq_context: 0 remove_cache_srcu_srcu_usage.lock irq_context: 0 remove_cache_srcu_srcu_usage.lock &obj_hash[i].lock irq_context: 0 &ACCESS_PRIVATE(sdp, lock) irq_context: 0 remove_cache_srcu irq_context: 0 remove_cache_srcu_srcu_usage.lock &ACCESS_PRIVATE(sdp, lock) irq_context: 0 remove_cache_srcu_srcu_usage.lock rcu_read_lock &pool->lock irq_context: 0 remove_cache_srcu_srcu_usage.lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 remove_cache_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 remove_cache_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 remove_cache_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex remove_cache_srcu_srcu_usage.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) remove_cache_srcu_srcu_usage.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &x->wait#3 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &ssp->srcu_sup->srcu_cb_mutex remove_cache_srcu_srcu_usage.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock flush_lock irq_context: 0 cpu_hotplug_lock flush_lock &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock flush_lock rcu_read_lock &pool->lock irq_context: 0 cpu_hotplug_lock flush_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock flush_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cpu_hotplug_lock flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock flush_lock (work_completion)(&sfw->work) irq_context: 0 cpu_hotplug_lock flush_lock rcu_read_lock (wq_completion)slub_flushwq irq_context: 0 cpu_hotplug_lock flush_lock &x->wait#10 irq_context: 0 cpu_hotplug_lock flush_lock &rq->__lock irq_context: 0 cpu_hotplug_lock flush_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock flush_lock rcu_read_lock &rq->__lock irq_context: 0 cpu_hotplug_lock flush_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 rename_lock.seqcount irq_context: 0 (wq_completion)slub_flushwq irq_context: 0 (wq_completion)slub_flushwq (work_completion)(&sfw->work) irq_context: 0 (wq_completion)slub_flushwq (work_completion)(&sfw->work) &c->lock irq_context: 0 (wq_completion)slub_flushwq (work_completion)(&sfw->work) &n->list_lock irq_context: 0 (wq_completion)slub_flushwq (work_completion)(&sfw->work) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)slub_flushwq (work_completion)(&sfw->work) &obj_hash[i].lock irq_context: 0 (wq_completion)slub_flushwq (work_completion)(&barr->work) irq_context: 0 (wq_completion)slub_flushwq (work_completion)(&barr->work) &x->wait#10 irq_context: 0 (wq_completion)slub_flushwq (work_completion)(&barr->work) &x->wait#10 &p->pi_lock irq_context: 0 (wq_completion)slub_flushwq (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)slub_flushwq (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &n->list_lock &c->lock irq_context: 0 system_transition_mutex irq_context: 0 (power_off_prep_handler_list).rwsem irq_context: 0 power_off_handler_list.lock irq_context: 0 (restart_prep_handler_list).rwsem irq_context: 0 (reboot_notifier_list).rwsem irq_context: 0 *(&acpi_gbl_gpe_lock) (console_sem).lock irq_context: 0 *(&acpi_gbl_gpe_lock) console_lock console_srcu console_owner_lock irq_context: 0 *(&acpi_gbl_gpe_lock) console_lock console_srcu console_owner irq_context: 0 *(&acpi_gbl_gpe_lock) console_lock console_srcu console_owner &port_lock_key irq_context: 0 *(&acpi_gbl_gpe_lock) console_lock console_srcu console_owner console_owner_lock irq_context: 0 acpi_scan_lock irq_context: 0 acpi_scan_lock semaphore->lock irq_context: 0 acpi_scan_lock fs_reclaim irq_context: 0 acpi_scan_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock pool_lock#2 irq_context: 0 acpi_scan_lock &obj_hash[i].lock irq_context: 0 acpi_scan_lock &x->wait#9 irq_context: 0 acpi_scan_lock acpi_device_lock irq_context: 0 acpi_scan_lock acpi_device_lock fs_reclaim irq_context: 0 acpi_scan_lock acpi_device_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock acpi_device_lock pool_lock#2 irq_context: 0 acpi_scan_lock acpi_device_lock &xa->xa_lock#2 irq_context: 0 acpi_scan_lock acpi_device_lock semaphore->lock irq_context: 0 acpi_scan_lock acpi_device_lock &obj_hash[i].lock irq_context: 0 acpi_scan_lock &k->list_lock irq_context: 0 acpi_scan_lock lock irq_context: 0 acpi_scan_lock lock kernfs_idr_lock irq_context: 0 acpi_scan_lock &root->kernfs_rwsem irq_context: 0 acpi_scan_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 acpi_scan_lock bus_type_sem irq_context: 0 acpi_scan_lock sysfs_symlink_target_lock irq_context: 0 acpi_scan_lock &k->k_lock irq_context: 0 acpi_scan_lock &root->kernfs_rwsem irq_context: 0 acpi_scan_lock &dev->power.lock irq_context: 0 acpi_scan_lock dpm_list_mtx irq_context: 0 acpi_scan_lock &dev->mutex &k->list_lock irq_context: 0 acpi_scan_lock &dev->mutex &k->k_lock irq_context: 0 acpi_scan_lock &dev->mutex &dev->power.lock irq_context: 0 acpi_scan_lock subsys mutex#12 irq_context: 0 acpi_scan_lock &____s->seqcount irq_context: 0 acpi_scan_lock &pcp->lock &zone->lock irq_context: 0 acpi_scan_lock &zone->lock irq_context: 0 acpi_scan_lock rcu_read_lock pool_lock#2 irq_context: 0 acpi_scan_lock uevent_sock_mutex irq_context: 0 acpi_scan_lock running_helpers_waitq.lock irq_context: 0 acpi_scan_lock &c->lock irq_context: 0 acpi_scan_lock *(&acpi_gbl_reference_count_lock) irq_context: 0 acpi_scan_lock &n->list_lock irq_context: 0 acpi_scan_lock &n->list_lock &c->lock irq_context: 0 acpi_scan_lock &rq->__lock irq_context: 0 acpi_scan_lock batched_entropy_u8.lock irq_context: 0 acpi_scan_lock kfence_freelist_lock irq_context: 0 acpi_scan_lock &meta->lock irq_context: 0 acpi_scan_lock pci_config_lock irq_context: 0 acpi_scan_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 acpi_scan_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 acpi_scan_lock &obj_hash[i].lock pool_lock irq_context: 0 acpi_scan_lock acpi_device_lock &c->lock irq_context: 0 acpi_scan_lock acpi_device_lock &pcp->lock &zone->lock irq_context: 0 acpi_scan_lock acpi_device_lock &zone->lock irq_context: 0 acpi_scan_lock acpi_device_lock &____s->seqcount irq_context: 0 acpi_scan_lock lock kernfs_idr_lock &c->lock irq_context: 0 acpi_scan_lock lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 acpi_scan_lock lock kernfs_idr_lock &zone->lock irq_context: 0 acpi_scan_lock lock kernfs_idr_lock &____s->seqcount irq_context: 0 acpi_scan_lock quarantine_lock irq_context: 0 acpi_scan_lock (console_sem).lock irq_context: 0 acpi_scan_lock console_lock console_srcu console_owner_lock irq_context: 0 acpi_scan_lock console_lock console_srcu console_owner irq_context: 0 acpi_scan_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 acpi_scan_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 acpi_scan_lock pci_bus_sem irq_context: 0 acpi_scan_lock pci_mmcfg_lock irq_context: 0 acpi_scan_lock resource_lock irq_context: 0 acpi_scan_lock &device->physical_node_lock irq_context: 0 acpi_scan_lock &device->physical_node_lock sysfs_symlink_target_lock irq_context: 0 acpi_scan_lock &device->physical_node_lock fs_reclaim irq_context: 0 acpi_scan_lock &device->physical_node_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock &device->physical_node_lock pool_lock#2 irq_context: 0 acpi_scan_lock &device->physical_node_lock lock irq_context: 0 acpi_scan_lock &device->physical_node_lock lock kernfs_idr_lock irq_context: 0 acpi_scan_lock &device->physical_node_lock &root->kernfs_rwsem irq_context: 0 acpi_scan_lock &device->physical_node_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 acpi_scan_lock fwnode_link_lock irq_context: 0 acpi_scan_lock fwnode_link_lock &k->k_lock irq_context: 0 acpi_scan_lock devtree_lock irq_context: 0 acpi_scan_lock gdp_mutex irq_context: 0 acpi_scan_lock gdp_mutex &k->list_lock irq_context: 0 acpi_scan_lock gdp_mutex fs_reclaim irq_context: 0 acpi_scan_lock gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock gdp_mutex pool_lock#2 irq_context: 0 acpi_scan_lock gdp_mutex lock irq_context: 0 acpi_scan_lock gdp_mutex lock kernfs_idr_lock irq_context: 0 acpi_scan_lock gdp_mutex &root->kernfs_rwsem irq_context: 0 acpi_scan_lock gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 acpi_scan_lock subsys mutex#13 irq_context: 0 acpi_scan_lock subsys mutex#13 &k->k_lock irq_context: 0 acpi_scan_lock pci_bus_sem irq_context: 0 acpi_scan_lock pci_acpi_companion_lookup_sem irq_context: 0 acpi_scan_lock pci_slot_mutex irq_context: 0 acpi_scan_lock tk_core.seq.seqcount irq_context: 0 acpi_scan_lock resource_alignment_lock irq_context: 0 acpi_scan_lock device_links_srcu irq_context: 0 acpi_scan_lock &dev->power.lock &dev->power.lock/1 irq_context: 0 acpi_scan_lock subsys mutex#14 irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock semaphore->lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock fs_reclaim irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock pool_lock#2 irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock *(&acpi_gbl_reference_count_lock) irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock fs_reclaim irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock pool_lock#2 irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock wakeup_ida.xa_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &x->wait#9 irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &obj_hash[i].lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &k->list_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex &k->list_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex fs_reclaim irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex &c->lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex &pcp->lock &zone->lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex &zone->lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex &____s->seqcount irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex pool_lock#2 irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex lock kernfs_idr_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex &root->kernfs_rwsem irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock lock kernfs_idr_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &root->kernfs_rwsem irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock bus_type_sem irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock sysfs_symlink_target_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &c->lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &pcp->lock &zone->lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &zone->lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &____s->seqcount irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock uevent_sock_mutex irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock running_helpers_waitq.lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &k->k_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock subsys mutex#15 irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock subsys mutex#15 &k->k_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock events_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock gdp_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock &c->lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock &____s->seqcount irq_context: 0 &pgdat->kswapd_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock &obj_hash[i].lock pool_lock irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 acpi_scan_lock acpi_pm_notifier_install_lock acpi_pm_notifier_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 acpi_scan_lock pci_rescan_remove_lock irq_context: 0 acpi_scan_lock pci_rescan_remove_lock &dev->mutex &dev->power.lock irq_context: 0 acpi_scan_lock pci_rescan_remove_lock &dev->mutex &k->list_lock irq_context: 0 acpi_scan_lock pci_rescan_remove_lock &dev->mutex &k->k_lock irq_context: softirq drivers/char/random.c:251 irq_context: softirq drivers/char/random.c:251 rcu_read_lock &pool->lock/1 irq_context: softirq drivers/char/random.c:251 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq drivers/char/random.c:251 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq drivers/char/random.c:251 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq drivers/char/random.c:251 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (next_reseed).work irq_context: 0 (wq_completion)events_unbound (next_reseed).work &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (next_reseed).work &base->lock irq_context: 0 (wq_completion)events_unbound (next_reseed).work &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (next_reseed).work input_pool.lock irq_context: 0 (wq_completion)events_unbound (next_reseed).work base_crng.lock irq_context: 0 acpi_scan_lock subsys mutex#3 irq_context: 0 acpi_scan_lock acpi_link_lock irq_context: 0 acpi_scan_lock acpi_link_lock fs_reclaim irq_context: 0 acpi_scan_lock acpi_link_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 acpi_scan_lock acpi_link_lock pool_lock#2 irq_context: 0 acpi_scan_lock acpi_link_lock semaphore->lock irq_context: 0 acpi_scan_lock acpi_link_lock &obj_hash[i].lock irq_context: 0 acpi_scan_lock acpi_link_lock *(&acpi_gbl_reference_count_lock) irq_context: 0 acpi_scan_lock acpi_link_lock pci_config_lock irq_context: 0 acpi_scan_lock acpi_link_lock &____s->seqcount irq_context: 0 acpi_scan_lock acpi_link_lock (console_sem).lock irq_context: 0 acpi_scan_lock acpi_link_lock console_lock console_srcu console_owner_lock irq_context: 0 acpi_scan_lock acpi_link_lock console_lock console_srcu console_owner irq_context: 0 acpi_scan_lock acpi_link_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 acpi_scan_lock acpi_link_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 acpi_scan_lock acpi_link_lock &pcp->lock &zone->lock irq_context: 0 acpi_scan_lock acpi_link_lock &zone->lock irq_context: 0 acpi_scan_lock acpi_link_lock rcu_read_lock pool_lock#2 irq_context: 0 acpi_scan_lock acpi_link_lock &c->lock irq_context: 0 acpi_scan_lock acpi_link_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 acpi_scan_lock acpi_dep_list_lock irq_context: 0 acpi_scan_lock wakeup_ida.xa_lock irq_context: 0 acpi_scan_lock subsys mutex#15 irq_context: 0 acpi_scan_lock subsys mutex#15 &k->k_lock irq_context: 0 acpi_scan_lock events_lock irq_context: 0 acpi_scan_lock power_resource_list_lock irq_context: 0 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 fill_pool_map-wait-type-override pool_lock irq_context: 0 acpi_device_lock irq_context: 0 &(&priv->bus_notifier)->rwsem irq_context: 0 k-sk_lock-AF_NETLINK irq_context: 0 k-sk_lock-AF_NETLINK k-slock-AF_NETLINK irq_context: 0 k-sk_lock-AF_NETLINK rcu_read_lock rhashtable_bucket irq_context: 0 k-slock-AF_NETLINK irq_context: 0 &type->s_umount_key#10/1 irq_context: 0 &type->s_umount_key#10/1 fs_reclaim irq_context: 0 &type->s_umount_key#10/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#10/1 pool_lock#2 irq_context: 0 &type->s_umount_key#10/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#10/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#10/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#10/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#10/1 sb_lock irq_context: 0 &type->s_umount_key#10/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#10/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#10/1 &zone->lock irq_context: 0 &type->s_umount_key#10/1 &____s->seqcount irq_context: 0 &type->s_umount_key#10/1 &c->lock irq_context: 0 &type->s_umount_key#10/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#10/1 &sb->s_type->i_lock_key#9 irq_context: 0 &type->s_umount_key#10/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#10/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#10/1 &sb->s_type->i_lock_key#9 &dentry->d_lock irq_context: 0 &type->s_umount_key#10/1 &dentry->d_lock irq_context: 0 &type->s_umount_key#11/1 irq_context: 0 &type->s_umount_key#11/1 fs_reclaim irq_context: 0 &type->s_umount_key#11/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#11/1 pool_lock#2 irq_context: 0 &type->s_umount_key#11/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#11/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#11/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#11/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#11/1 sb_lock irq_context: 0 &type->s_umount_key#11/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#11/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#11/1 &sb->s_type->i_lock_key#10 irq_context: 0 &type->s_umount_key#11/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#11/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#11/1 &sb->s_type->i_lock_key#10 &dentry->d_lock irq_context: 0 &type->s_umount_key#11/1 &dentry->d_lock irq_context: 0 &mm->mmap_lock irq_context: 0 &mm->mmap_lock reservation_ww_class_acquire irq_context: 0 &mm->mmap_lock reservation_ww_class_acquire reservation_ww_class_mutex irq_context: 0 &mm->mmap_lock reservation_ww_class_acquire reservation_ww_class_mutex fs_reclaim irq_context: 0 &mm->mmap_lock reservation_ww_class_acquire reservation_ww_class_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock reservation_ww_class_acquire reservation_ww_class_mutex fs_reclaim &mapping->i_mmap_rwsem irq_context: 0 &mm->mmap_lock reservation_ww_class_acquire reservation_ww_class_mutex fs_reclaim mmu_notifier_invalidate_range_start dma_fence_map irq_context: 0 delayed_uprobe_lock irq_context: 0 key irq_context: 0 attribute_container_mutex irq_context: 0 triggers_list_lock irq_context: 0 leds_list_lock irq_context: 0 bus_type_sem irq_context: 0 (usb_notifier_list).rwsem irq_context: 0 &device->physical_node_lock irq_context: 0 rc_map_lock irq_context: 0 subsys mutex#16 irq_context: 0 fill_pool_map-wait-type-override &c->lock irq_context: 0 fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 fill_pool_map-wait-type-override &zone->lock irq_context: 0 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 resource_lock irq_context: 0 free_vmap_area_lock &obj_hash[i].lock irq_context: 0 free_vmap_area_lock pool_lock#2 irq_context: 0 &entry->access irq_context: 0 info_mutex irq_context: 0 info_mutex proc_subdir_lock irq_context: 0 info_mutex fs_reclaim irq_context: 0 info_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 info_mutex pool_lock#2 irq_context: 0 info_mutex proc_inum_ida.xa_lock irq_context: 0 info_mutex proc_subdir_lock irq_context: 0 info_mutex &c->lock irq_context: 0 info_mutex &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem rcu_read_lock pool_lock#2 irq_context: 0 kobj_ns_type_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem rtnl_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex pcpu_alloc_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &xa->xa_lock#3 irq_context: 0 pernet_ops_rwsem rtnl_mutex &xa->xa_lock#3 pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &x->wait#9 irq_context: 0 pernet_ops_rwsem rtnl_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &k->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex lock irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex kobj_ns_type_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex lock irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex bus_type_sem irq_context: 0 pernet_ops_rwsem rtnl_mutex sysfs_symlink_target_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem irq_context: softirq mm/vmstat.c:2018 irq_context: softirq mm/vmstat.c:2018 rcu_read_lock &pool->lock irq_context: softirq mm/vmstat.c:2018 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq mm/vmstat.c:2018 rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq mm/vmstat.c:2018 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq mm/vmstat.c:2018 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)events (shepherd).work irq_context: 0 (wq_completion)events (shepherd).work cpu_hotplug_lock irq_context: 0 (wq_completion)events (shepherd).work cpu_hotplug_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (shepherd).work cpu_hotplug_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (shepherd).work cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (shepherd).work cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (shepherd).work cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (shepherd).work &obj_hash[i].lock irq_context: 0 (wq_completion)events (shepherd).work &base->lock irq_context: 0 (wq_completion)events (shepherd).work &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mm_percpu_wq irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) &obj_hash[i].lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) &base->lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &dev->power.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dpm_list_mtx irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex running_helpers_waitq.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex subsys mutex#17 irq_context: 0 pernet_ops_rwsem rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &dir->lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &zone->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_hotplug_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_base_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex input_pool.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex nl_table_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex nl_table_wait.lock irq_context: 0 rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock nl_table_lock irq_context: 0 rcu_read_lock nl_table_wait.lock irq_context: 0 qdisc_mod_lock irq_context: 0 bt_proto_lock irq_context: 0 hci_cb_list_lock irq_context: 0 mgmt_chan_list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex net_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 rate_ctrl_mutex irq_context: 0 rate_ctrl_mutex fs_reclaim irq_context: 0 rate_ctrl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rate_ctrl_mutex pool_lock#2 irq_context: 0 netlbl_domhsh_lock irq_context: 0 netlbl_unlhsh_lock irq_context: 0 rcu_read_lock netlbl_domhsh_lock irq_context: 0 rcu_read_lock netlbl_domhsh_lock pool_lock#2 irq_context: 0 misc_mtx irq_context: 0 misc_mtx fs_reclaim irq_context: 0 misc_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 misc_mtx pool_lock#2 irq_context: 0 misc_mtx &x->wait#9 irq_context: 0 misc_mtx &obj_hash[i].lock irq_context: 0 misc_mtx &c->lock irq_context: 0 misc_mtx &pcp->lock &zone->lock irq_context: 0 misc_mtx &zone->lock irq_context: 0 misc_mtx &____s->seqcount irq_context: 0 misc_mtx &k->list_lock irq_context: 0 misc_mtx gdp_mutex irq_context: 0 misc_mtx gdp_mutex &k->list_lock irq_context: 0 misc_mtx gdp_mutex fs_reclaim irq_context: 0 misc_mtx gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 misc_mtx gdp_mutex pool_lock#2 irq_context: 0 misc_mtx gdp_mutex lock irq_context: 0 misc_mtx gdp_mutex lock kernfs_idr_lock irq_context: 0 misc_mtx gdp_mutex &root->kernfs_rwsem irq_context: 0 misc_mtx gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 misc_mtx lock irq_context: 0 misc_mtx lock kernfs_idr_lock irq_context: 0 misc_mtx &root->kernfs_rwsem irq_context: 0 misc_mtx &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 misc_mtx bus_type_sem irq_context: 0 misc_mtx sysfs_symlink_target_lock irq_context: 0 misc_mtx &root->kernfs_rwsem irq_context: 0 misc_mtx &dev->power.lock irq_context: 0 misc_mtx dpm_list_mtx irq_context: 0 misc_mtx req_lock irq_context: 0 misc_mtx &p->pi_lock irq_context: 0 misc_mtx &p->pi_lock &rq->__lock irq_context: 0 misc_mtx &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 misc_mtx &x->wait#11 irq_context: 0 misc_mtx &rq->__lock irq_context: 0 misc_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 misc_mtx rcu_read_lock &rq->__lock irq_context: 0 sb_writers irq_context: 0 misc_mtx rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers mount_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 rename_lock.seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key/1 fs_reclaim irq_context: 0 sb_writers &type->i_mutex_dir_key/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &c->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &pcp->lock &zone->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &zone->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &____s->seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key/1 pool_lock#2 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &dentry->d_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &obj_hash[i].lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sbinfo->stat_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_lock_key#5 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &s->s_inode_list_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tk_core.seq.seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key/1 batched_entropy_u32.lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 batched_entropy_u32.lock crngs.lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 batched_entropy_u32.lock crngs.lock base_crng.lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_lock_key#5 &dentry->d_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 tk_core.seq.seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key/1 rcu_read_lock &dentry->d_lock irq_context: 0 &x->wait#11 irq_context: 0 &x->wait#11 &p->pi_lock irq_context: 0 &x->wait#11 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &x->wait#11 &p->pi_lock &rq->__lock irq_context: 0 &x->wait#11 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 misc_mtx uevent_sock_mutex irq_context: 0 misc_mtx running_helpers_waitq.lock irq_context: 0 misc_mtx subsys mutex#18 irq_context: 0 misc_mtx subsys mutex#18 &k->k_lock irq_context: 0 rcu_read_lock &pool->lock irq_context: 0 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 input_mutex irq_context: 0 input_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill_global_led_trigger_work) irq_context: 0 (wq_completion)events (work_completion)(&rfkill_global_led_trigger_work) rfkill_global_mutex irq_context: 0 input_mutex input_devices_poll_wait.lock irq_context: 0 (netlink_chain).rwsem irq_context: 0 proto_tab_lock irq_context: 0 resource_lock pool_lock#2 irq_context: 0 resource_lock &obj_hash[i].lock irq_context: 0 random_ready_notifier.lock irq_context: 0 random_ready_notifier.lock crngs.lock irq_context: 0 misc_mtx misc_minors_ida.xa_lock irq_context: 0 vga_lock#2 irq_context: 0 vga_lock#2 pci_config_lock irq_context: 0 vga_lock#2 (console_sem).lock irq_context: 0 vga_lock#2 console_lock console_srcu console_owner_lock irq_context: 0 vga_lock#2 console_lock console_srcu console_owner irq_context: 0 vga_lock#2 console_lock console_srcu console_owner &port_lock_key irq_context: 0 vga_lock#2 console_lock console_srcu console_owner console_owner_lock irq_context: 0 disable_lock irq_context: 0 disable_lock fs_reclaim irq_context: 0 disable_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 disable_lock pool_lock#2 irq_context: 0 disable_lock &x->wait#9 irq_context: 0 disable_lock &obj_hash[i].lock irq_context: 0 disable_lock &k->list_lock irq_context: 0 disable_lock lock irq_context: 0 disable_lock lock kernfs_idr_lock irq_context: 0 disable_lock &root->kernfs_rwsem irq_context: 0 disable_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 disable_lock bus_type_sem irq_context: 0 disable_lock sysfs_symlink_target_lock irq_context: 0 disable_lock &k->k_lock irq_context: 0 disable_lock &root->kernfs_rwsem irq_context: 0 disable_lock &dev->power.lock irq_context: 0 disable_lock dpm_list_mtx irq_context: 0 disable_lock &(&priv->bus_notifier)->rwsem irq_context: 0 disable_lock uevent_sock_mutex irq_context: 0 disable_lock running_helpers_waitq.lock irq_context: 0 disable_lock &dev->mutex &dev->power.lock irq_context: 0 disable_lock &dev->mutex &k->list_lock irq_context: 0 disable_lock &dev->mutex &k->k_lock irq_context: 0 disable_lock subsys mutex#3 irq_context: 0 &type->s_umount_key#12/1 irq_context: 0 &type->s_umount_key#12/1 fs_reclaim irq_context: 0 &type->s_umount_key#12/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#12/1 pool_lock#2 irq_context: 0 &type->s_umount_key#12/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#12/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#12/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#12/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#12/1 sb_lock irq_context: 0 &type->s_umount_key#12/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#12/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#12/1 &sb->s_type->i_lock_key#11 irq_context: 0 &type->s_umount_key#12/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#12/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#12/1 &sb->s_type->i_lock_key#11 &dentry->d_lock irq_context: 0 &type->s_umount_key#12/1 &dentry->d_lock irq_context: 0 &sb->s_type->i_lock_key#11 irq_context: 0 clocksource_mutex cpu_hotplug_lock irq_context: 0 clocksource_mutex cpu_hotplug_lock stop_cpus_mutex irq_context: 0 clocksource_mutex cpu_hotplug_lock stop_cpus_mutex &stopper->lock irq_context: 0 clocksource_mutex cpu_hotplug_lock stop_cpus_mutex &stop_pi_lock irq_context: 0 clocksource_mutex cpu_hotplug_lock stop_cpus_mutex &stop_pi_lock &rq->__lock irq_context: 0 clocksource_mutex cpu_hotplug_lock stop_cpus_mutex &stop_pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 clocksource_mutex cpu_hotplug_lock stop_cpus_mutex &rq->__lock irq_context: 0 clocksource_mutex cpu_hotplug_lock stop_cpus_mutex &x->wait#8 irq_context: 0 clocksource_mutex (console_sem).lock irq_context: 0 clocksource_mutex console_lock console_srcu console_owner_lock irq_context: 0 clocksource_mutex console_lock console_srcu console_owner irq_context: 0 clocksource_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 clocksource_mutex console_lock console_srcu console_owner console_owner_lock irq_context: hardirq tick_broadcast_lock irq_context: hardirq tick_broadcast_lock jiffies_lock irq_context: hardirq hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 clocksource_mutex &rq->__lock irq_context: 0 (wq_completion)events timer_update_work irq_context: 0 (wq_completion)events timer_update_work timer_keys_mutex irq_context: 0 (wq_completion)events timer_update_work timer_keys_mutex cpu_hotplug_lock irq_context: 0 (wq_completion)events timer_update_work timer_keys_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 (wq_completion)events timer_update_work timer_keys_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 (wq_completion)events timer_update_work timer_keys_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 fs_reclaim irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 pool_lock#2 irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 &dentry->d_lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 &obj_hash[i].lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#13/1 irq_context: 0 &type->s_umount_key#13/1 fs_reclaim irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss &c->lock irq_context: 0 &type->s_umount_key#13/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#13/1 pool_lock#2 irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss &____s->seqcount irq_context: 0 &type->s_umount_key#13/1 pcpu_alloc_mutex irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss pool_lock#2 irq_context: 0 &type->s_umount_key#13/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 &type->s_umount_key#13/1 shrinker_rwsem irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 &type->s_umount_key#13/1 list_lrus_mutex irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss tomoyo_log_lock irq_context: 0 &type->s_umount_key#13/1 sb_lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss tomoyo_log_wait.lock irq_context: 0 &type->s_umount_key#13/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss tomoyo_policy_lock irq_context: 0 &type->s_umount_key#13/1 &c->lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#13/1 &____s->seqcount irq_context: 0 &type->s_umount_key#13/1 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: 0 &type->s_umount_key#13/1 &sb->s_type->i_lock_key#12 irq_context: 0 &type->s_umount_key#13/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#13/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#13/1 &sb->s_type->i_lock_key#12 &dentry->d_lock irq_context: 0 &type->s_umount_key#13/1 &dentry->d_lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 &sb->s_type->i_lock_key#2 irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 &s->s_inode_list_lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 tk_core.seq.seqcount irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 &sb->s_type->i_lock_key#2 &dentry->d_lock irq_context: 0 tomoyo_ss &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#5 irq_context: 0 &sb->s_type->i_mutex_key#5 &sb->s_type->i_lock_key#12 irq_context: 0 &sb->s_type->i_mutex_key#5 rename_lock.seqcount irq_context: 0 tomoyo_ss &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#5 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#5 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#5 pool_lock#2 irq_context: 0 tomoyo_ss rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#5 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#5 rcu_read_lock rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#5 &dentry->d_lock &wq irq_context: 0 &sb->s_type->i_mutex_key &dentry->d_lock &wq#2 irq_context: 0 &sb->s_type->i_mutex_key#5 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 &sb->s_type->i_mutex_key irq_context: 0 &sb->s_type->i_mutex_key#5 &s->s_inode_list_lock irq_context: 0 &sb->s_type->i_mutex_key#5 tk_core.seq.seqcount irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 &sb->s_type->i_mutex_key tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#5 &sb->s_type->i_lock_key#12 &dentry->d_lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 &sb->s_type->i_mutex_key rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#2 &sb->s_type->i_mutex_key/1 &sb->s_type->i_mutex_key &dentry->d_lock irq_context: 0 tomoyo_ss file_systems_lock irq_context: 0 tomoyo_ss fs_reclaim irq_context: 0 tomoyo_ss fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 tomoyo_ss rcu_read_lock init_fs.seq.seqcount irq_context: 0 &disk->open_mutex bdev_lock irq_context: 0 &sb->s_type->i_mutex_key#5 &pcp->lock &zone->lock irq_context: 0 &disk->open_mutex bdev_lock &bdev->bd_holder_lock irq_context: 0 &sb->s_type->i_mutex_key#5 &zone->lock irq_context: 0 &bdev->bd_fsfreeze_mutex irq_context: 0 &sb->s_type->i_mutex_key#5 &____s->seqcount irq_context: 0 &bdev->bd_fsfreeze_mutex sb_lock irq_context: 0 &bdev->bd_fsfreeze_mutex fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#5 rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#5 &obj_hash[i].lock irq_context: 0 &bdev->bd_fsfreeze_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &bdev->bd_fsfreeze_mutex pool_lock#2 irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#25/1 irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#25/1 fs_reclaim irq_context: 0 &type->s_umount_key#15/1 irq_context: 0 &type->s_umount_key#15/1 fs_reclaim irq_context: 0 &type->s_umount_key#15/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#25/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#15/1 pool_lock#2 irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#25/1 pool_lock#2 irq_context: 0 &type->s_umount_key#15/1 pcpu_alloc_mutex irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#25/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#15/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#25/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#15/1 shrinker_rwsem irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#25/1 shrinker_rwsem irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#25/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#15/1 list_lrus_mutex irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#25/1 sb_lock irq_context: 0 &type->s_umount_key#25/1 irq_context: 0 &type->s_umount_key#15/1 sb_lock irq_context: 0 &type->s_umount_key#15/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#25/1 fs_reclaim irq_context: 0 &type->s_umount_key#25/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#15/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#25/1 pool_lock#2 irq_context: 0 &type->s_umount_key#25/1 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#25/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#15/1 &sb->s_type->i_lock_key#13 irq_context: 0 &type->s_umount_key#25/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#15/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#25/1 &wq->mutex irq_context: 0 &type->s_umount_key#15/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#25/1 &wq->mutex &pool->lock irq_context: 0 &type->s_umount_key#15/1 &sb->s_type->i_lock_key#13 &dentry->d_lock irq_context: 0 &type->s_umount_key#25/1 &c->lock irq_context: 0 &type->s_umount_key#15/1 &dentry->d_lock irq_context: 0 &type->s_umount_key#25/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#25/1 &zone->lock irq_context: 0 &type->s_umount_key#25/1 &____s->seqcount irq_context: 0 &type->s_umount_key#25/1 kthread_create_lock irq_context: 0 &type->s_umount_key#25/1 &p->pi_lock irq_context: 0 &type->s_umount_key#25/1 &x->wait irq_context: 0 &type->s_umount_key#25/1 &rq->__lock irq_context: 0 &type->s_umount_key#25/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#25/1 &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#25/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#25/1 wq_pool_mutex irq_context: 0 &type->s_umount_key#25/1 wq_pool_mutex &wq->mutex irq_context: 0 &type->s_umount_key#25/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#25/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->s_umount_key#25/1 &xa->xa_lock#8 irq_context: 0 &type->s_umount_key#16/1 irq_context: 0 &type->s_umount_key#16/1 fs_reclaim irq_context: 0 &type->s_umount_key#16/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#16/1 pool_lock#2 irq_context: 0 &type->s_umount_key#16/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#16/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#16/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#16/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#16/1 sb_lock irq_context: 0 &type->s_umount_key#16/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#16/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#16/1 &sb->s_type->i_lock_key#14 irq_context: 0 &type->s_umount_key#16/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#16/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#16/1 &sb->s_type->i_lock_key#14 &dentry->d_lock irq_context: 0 &type->s_umount_key#25/1 &xa->xa_lock#8 pool_lock#2 irq_context: 0 &type->s_umount_key#16/1 &dentry->d_lock irq_context: 0 &type->s_umount_key#25/1 lock#4 irq_context: 0 &type->s_umount_key#25/1 &mapping->private_lock irq_context: 0 &type->s_umount_key#25/1 tk_core.seq.seqcount irq_context: 0 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#17/1 irq_context: 0 &type->s_umount_key#17/1 fs_reclaim irq_context: 0 &type->s_umount_key#17/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#17/1 pool_lock#2 irq_context: 0 &type->s_umount_key#17/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#17/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#17/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#17/1 &c->lock irq_context: 0 &type->s_umount_key#17/1 &____s->seqcount irq_context: 0 &type->s_umount_key#17/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#17/1 sb_lock irq_context: 0 &type->s_umount_key#17/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#17/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#17/1 &sb->s_type->i_lock_key#15 irq_context: 0 &type->s_umount_key#17/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#17/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#17/1 &sb->s_type->i_lock_key#15 &dentry->d_lock irq_context: 0 &type->s_umount_key#17/1 &dentry->d_lock irq_context: 0 &sb->s_type->i_lock_key#15 irq_context: 0 kclist_lock irq_context: 0 kclist_lock resource_lock irq_context: 0 kclist_lock fs_reclaim irq_context: 0 kclist_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kclist_lock pool_lock#2 irq_context: 0 kclist_lock &c->lock irq_context: 0 kclist_lock &____s->seqcount irq_context: 0 &type->s_umount_key#18/1 irq_context: 0 &type->s_umount_key#18/1 fs_reclaim irq_context: 0 &type->s_umount_key#18/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#18/1 pool_lock#2 irq_context: 0 &type->s_umount_key#18/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#18/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#18/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#18/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#18/1 sb_lock irq_context: 0 &type->s_umount_key#18/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#18/1 &____s->seqcount irq_context: 0 &type->s_umount_key#18/1 &c->lock irq_context: 0 &type->s_umount_key#18/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#18/1 &sb->s_type->i_lock_key#16 irq_context: 0 &type->s_umount_key#18/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#18/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#18/1 &sb->s_type->i_lock_key#16 &dentry->d_lock irq_context: 0 &type->s_umount_key#18/1 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#2 &c->lock irq_context: 0 &sb->s_type->i_mutex_key#2 &pcp->lock &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#2 &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#2 &____s->seqcount irq_context: 0 tomoyo_ss irq_context: 0 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 tomoyo_ss pool_lock#2 irq_context: 0 tomoyo_ss tomoyo_policy_lock irq_context: 0 tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: 0 tomoyo_ss (console_sem).lock irq_context: 0 tomoyo_ss console_lock console_srcu console_owner_lock irq_context: 0 tomoyo_ss console_lock console_srcu console_owner irq_context: 0 tomoyo_ss console_lock console_srcu console_owner &port_lock_key irq_context: 0 tomoyo_ss console_lock console_srcu console_owner console_owner_lock irq_context: 0 &type->s_umount_key#19/1 irq_context: 0 &type->s_umount_key#19/1 fs_reclaim irq_context: 0 &type->s_umount_key#19/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#19/1 pool_lock#2 irq_context: 0 &type->s_umount_key#19/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#19/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#19/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#19/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#19/1 sb_lock irq_context: 0 &type->s_umount_key#19/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#19/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#19/1 &sb->s_type->i_lock_key#17 irq_context: 0 &type->s_umount_key#19/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#19/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#19/1 &sb->s_type->i_lock_key#17 &dentry->d_lock irq_context: 0 &type->s_umount_key#19/1 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#2 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#2 rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#2 &obj_hash[i].lock irq_context: 0 &ns->lock irq_context: 0 &ns->lock &dentry->d_lock irq_context: 0 &ns->lock pin_fs_lock irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 &sb->s_type->i_lock_key#17 irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 rename_lock.seqcount irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 fs_reclaim irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 pool_lock#2 irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 &dentry->d_lock irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 rcu_read_lock rename_lock.seqcount irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 &dentry->d_lock &wq irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 mmu_notifier_invalidate_range_start irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 &s->s_inode_list_lock irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 tk_core.seq.seqcount irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 &sb->s_type->i_lock_key#17 &dentry->d_lock irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 &c->lock irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 &pcp->lock &zone->lock irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 &zone->lock irq_context: 0 &ns->lock &sb->s_type->i_mutex_key#6 &____s->seqcount irq_context: 0 &type->s_umount_key#20 irq_context: 0 &type->s_umount_key#20 sb_lock irq_context: 0 &type->s_umount_key#20 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#2 rcu_read_lock &dentry->d_lock irq_context: 0 pnp_lock irq_context: 0 pnp_lock fs_reclaim irq_context: 0 pnp_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pnp_lock pool_lock#2 irq_context: 0 &device->physical_node_lock sysfs_symlink_target_lock irq_context: 0 &device->physical_node_lock fs_reclaim irq_context: 0 &device->physical_node_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &device->physical_node_lock pool_lock#2 irq_context: 0 &device->physical_node_lock lock irq_context: 0 &device->physical_node_lock lock kernfs_idr_lock irq_context: 0 &device->physical_node_lock &root->kernfs_rwsem irq_context: 0 &device->physical_node_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &device->physical_node_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 fwnode_link_lock irq_context: 0 fwnode_link_lock &k->k_lock irq_context: 0 subsys mutex#19 irq_context: 0 &device->physical_node_lock &c->lock irq_context: 0 &device->physical_node_lock &____s->seqcount irq_context: softirq rcu_callback pcpu_lock irq_context: 0 subsys mutex#20 irq_context: 0 subsys mutex#20 &k->k_lock irq_context: 0 subsys mutex#21 irq_context: 0 subsys mutex#21 &k->k_lock irq_context: 0 gdp_mutex &c->lock irq_context: 0 gdp_mutex &____s->seqcount irq_context: 0 subsys mutex#22 irq_context: 0 subsys mutex#22 &k->k_lock irq_context: 0 tty_mutex irq_context: softirq led_lock irq_context: 0 subsys mutex#23 irq_context: 0 subsys mutex#23 &k->list_lock irq_context: 0 subsys mutex#23 &k->k_lock irq_context: 0 jiffies_seq.seqcount irq_context: 0 cpu_hotplug_lock wq_pool_mutex &xa->xa_lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex kthread_create_lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &p->pi_lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock wq_pool_mutex &rq->__lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &x->wait irq_context: 0 cpu_hotplug_lock wq_pool_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock wq_pool_mutex wq_pool_attach_mutex irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pool->lock/1 &p->pi_lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock wq_pool_mutex &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 netevent_notif_chain.lock irq_context: 0 clients_rwsem irq_context: 0 clients_rwsem fs_reclaim irq_context: 0 clients_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 clients_rwsem clients.xa_lock irq_context: 0 devices_rwsem irq_context: 0 clients_rwsem clients.xa_lock &c->lock irq_context: 0 clients_rwsem clients.xa_lock &pcp->lock &zone->lock irq_context: 0 clients_rwsem clients.xa_lock &zone->lock irq_context: 0 clients_rwsem clients.xa_lock &____s->seqcount irq_context: 0 clients_rwsem clients.xa_lock pool_lock#2 irq_context: 0 cpu_hotplug_lock wq_pool_mutex rcu_read_lock pool_lock#2 irq_context: 0 (blocking_lsm_notifier_chain).rwsem irq_context: 0 (inetaddr_chain).rwsem irq_context: 0 inet6addr_chain.lock irq_context: 0 buses_mutex irq_context: 0 offload_lock irq_context: 0 inetsw_lock irq_context: 0 (wq_completion)events_power_efficient irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->managed_work)->work) irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->managed_work)->work) &tbl->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->managed_work)->work) &tbl->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->managed_work)->work) &tbl->lock &base->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->managed_work)->work) &tbl->lock &base->lock &obj_hash[i].lock irq_context: 0 ptype_lock irq_context: 0 pernet_ops_rwsem nl_table_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex batched_entropy_u32.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &tbl->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock irq_context: 0 rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work &base->lock irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &net->rules_mod_lock irq_context: 0 pernet_ops_rwsem slab_mutex irq_context: 0 pernet_ops_rwsem slab_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem slab_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem slab_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem slab_mutex &c->lock irq_context: 0 pernet_ops_rwsem slab_mutex &n->list_lock irq_context: 0 pernet_ops_rwsem slab_mutex pcpu_alloc_mutex irq_context: 0 pernet_ops_rwsem slab_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 pernet_ops_rwsem batched_entropy_u32.lock irq_context: 0 tcp_ulp_list_lock irq_context: 0 xfrm_state_afinfo_lock irq_context: 0 xfrm_policy_afinfo_lock irq_context: 0 xfrm_input_afinfo_lock irq_context: 0 pernet_ops_rwsem percpu_counters_lock irq_context: 0 rtnl_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex krc.lock irq_context: 0 rtnl_mutex krc.lock &obj_hash[i].lock irq_context: 0 rtnl_mutex krc.lock hrtimer_bases.lock irq_context: 0 rtnl_mutex krc.lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 rtnl_mutex krc.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 rtnl_mutex krc.lock &base->lock irq_context: 0 rtnl_mutex krc.lock &base->lock &obj_hash[i].lock irq_context: hardirq rcu_read_lock &pool->lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_highpri irq_context: 0 (wq_completion)events_highpri (work_completion)(&(&krcp->page_cache_work)->work) irq_context: 0 (wq_completion)events_highpri (work_completion)(&(&krcp->page_cache_work)->work) fs_reclaim irq_context: 0 (wq_completion)events_highpri (work_completion)(&(&krcp->page_cache_work)->work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_highpri (work_completion)(&(&krcp->page_cache_work)->work) &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_highpri (work_completion)(&(&krcp->page_cache_work)->work) &zone->lock irq_context: 0 (wq_completion)events_highpri (work_completion)(&(&krcp->page_cache_work)->work) &____s->seqcount irq_context: 0 (wq_completion)events_highpri (work_completion)(&(&krcp->page_cache_work)->work) pool_lock#2 irq_context: 0 (wq_completion)events_highpri (work_completion)(&(&krcp->page_cache_work)->work) krc.lock irq_context: 0 &hashinfo->lock irq_context: 0 tcp_cong_list_lock irq_context: 0 pernet_ops_rwsem cache_list_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (rpc_pipefs_notifier_list).rwsem irq_context: 0 svc_xprt_class_lock irq_context: 0 xprt_list_lock irq_context: 0 xprt_list_lock (console_sem).lock irq_context: 0 xprt_list_lock console_lock console_srcu console_owner_lock irq_context: 0 xprt_list_lock console_lock console_srcu console_owner irq_context: 0 xprt_list_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 xprt_list_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&cache_cleaner)->work) irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&cache_cleaner)->work) cache_list_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&cache_cleaner)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&cache_cleaner)->work) &base->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&cache_cleaner)->work) &base->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock &c->lock irq_context: 0 rcu_read_lock &pcp->lock &zone->lock irq_context: 0 rcu_read_lock &zone->lock irq_context: 0 rcu_read_lock &____s->seqcount irq_context: 0 &(&priv->bus_notifier)->rwsem irq_context: 0 hrtimer_bases.lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 hrtimer_bases.lock fill_pool_map-wait-type-override pool_lock irq_context: 0 pcibios_fwaddrmap_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rcu_read_lock init_fs.seq.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &sb->s_type->i_mutex_key irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &sb->s_type->i_mutex_key fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &sb->s_type->i_mutex_key fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &sb->s_type->i_mutex_key pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &sb->s_type->i_mutex_key &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &sb->s_type->i_mutex_key rcu_read_lock rename_lock.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &sb->s_type->i_mutex_key &dentry->d_lock &wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 mount_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 rename_lock.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss tomoyo_log_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss tomoyo_log_wait.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss tomoyo_policy_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 &sb->s_type->i_lock_key#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 &s->s_inode_list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tk_core.seq.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 &sb->s_type->i_lock_key#2 &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss tomoyo_log_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss tomoyo_log_wait.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss tomoyo_policy_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tk_core.seq.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key &sb->s_type->i_lock_key#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key &wb->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key &wb->list_lock &sb->s_type->i_lock_key#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rcu_read_lock &sb->s_type->i_lock_key#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss tomoyo_policy_lock &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss tomoyo_policy_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss tomoyo_policy_lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 tomoyo_ss tomoyo_policy_lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key/1 &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sb_writers#2 &sb->s_type->i_mutex_key tomoyo_ss &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) quarantine_lock irq_context: 0 umhelper_sem irq_context: 0 umhelper_sem usermodehelper_disabled_waitq.lock irq_context: 0 &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#3 batched_entropy_u8.lock irq_context: 0 &sb->s_type->i_mutex_key#3 batched_entropy_u8.lock crngs.lock irq_context: 0 &sb->s_type->i_mutex_key#3 kfence_freelist_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &sighand->siglock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) free_vmap_area_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) vmap_area_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) init_mm.page_table_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) batched_entropy_u64.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) init_files.file_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) init_fs.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) init_fs.lock &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &p->alloc_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) lock pidmap_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) pidmap_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem css_set_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem tk_core.seq.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem tasklist_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem tasklist_lock &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem tasklist_lock &sighand->siglock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem css_set_lock &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem css_set_lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) input_pool.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &drv->dynids.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 &pool->lock/1 &base->lock irq_context: 0 &pool->lock/1 &base->lock &obj_hash[i].lock irq_context: 0 umh_sysctl_lock irq_context: 0 &mm->mmap_lock irq_context: 0 &mm->mmap_lock fs_reclaim irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock pool_lock#2 irq_context: 0 &mm->mmap_lock fs_reclaim irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock &pcp->lock &zone->lock irq_context: 0 &mm->mmap_lock &zone->lock irq_context: 0 &mm->mmap_lock &____s->seqcount irq_context: 0 &mm->mmap_lock pool_lock#2 irq_context: 0 &mm->mmap_lock &mm->page_table_lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page) irq_context: 0 &mm->mmap_lock &c->lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &mm->page_table_lock irq_context: 0 &mm->mmap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &mm->mmap_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 lock#4 irq_context: 0 &sig->cred_guard_mutex irq_context: 0 &sig->cred_guard_mutex fs_reclaim irq_context: 0 &sig->cred_guard_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &fs->lock irq_context: 0 &sig->cred_guard_mutex &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex &zone->lock irq_context: 0 &sig->cred_guard_mutex &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &c->lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &____s->seqcount#3 irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &dentry->d_lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key fs_reclaim irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &dentry->d_lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key rcu_read_lock rename_lock.seqcount irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &dentry->d_lock &wq irq_context: 0 &sig->cred_guard_mutex &dentry->d_lock irq_context: 0 &sig->cred_guard_mutex &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex delayed_uprobe_lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock lock#4 irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock lock#4 &lruvec->lru_lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock lock#5 irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &vma->vm_lock->lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &anon_vma->rwsem irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock lock#4 irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock lock#5 irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &lruvec->lru_lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex pgd_lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex key irq_context: 0 &sig->cred_guard_mutex pcpu_lock irq_context: 0 &sig->cred_guard_mutex percpu_counters_lock irq_context: 0 &tsk->futex_exit_mutex irq_context: 0 &tsk->futex_exit_mutex &p->pi_lock irq_context: 0 &p->alloc_lock &fs->lock irq_context: 0 &child->perf_event_mutex irq_context: 0 css_set_lock irq_context: 0 tasklist_lock irq_context: 0 tasklist_lock &pid->wait_pidfd irq_context: 0 tasklist_lock &sighand->siglock irq_context: 0 tasklist_lock &sighand->siglock &sig->wait_chldexit irq_context: 0 tasklist_lock &sighand->siglock input_pool.lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 &obj_hash[i].lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pool_lock#2 irq_context: 0 tasklist_lock &obj_hash[i].lock irq_context: 0 low_water_lock irq_context: 0 low_water_lock (console_sem).lock irq_context: 0 low_water_lock console_lock console_srcu console_owner_lock irq_context: 0 low_water_lock console_lock console_srcu console_owner irq_context: 0 low_water_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &p->pi_lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &x->wait#6 irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &rq->__lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 low_water_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up fs_reclaim irq_context: 0 cpu_hotplug_lock cpuhp_state-up fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cpu_hotplug_lock cpuhp_state-up &c->lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &n->list_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &pcp->lock &zone->lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &zone->lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &____s->seqcount irq_context: 0 cpu_hotplug_lock cpuhp_state-up pool_lock#2 irq_context: 0 &x->wait#6 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &x->wait#6 &p->pi_lock &rq->__lock irq_context: 0 &x->wait#6 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &cfs_rq->removed.lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex pool_lock#2 irq_context: 0 &cfs_rq->removed.lock irq_context: 0 pmus_lock &obj_hash[i].lock pool_lock irq_context: 0 vendor_module_lock irq_context: 0 vendor_module_lock slab_mutex irq_context: 0 vendor_module_lock slab_mutex fs_reclaim irq_context: 0 vendor_module_lock slab_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 vendor_module_lock slab_mutex &c->lock irq_context: 0 vendor_module_lock slab_mutex &pcp->lock &zone->lock irq_context: 0 vendor_module_lock slab_mutex &zone->lock irq_context: 0 vendor_module_lock slab_mutex &____s->seqcount irq_context: 0 vendor_module_lock slab_mutex pool_lock#2 irq_context: 0 vendor_module_lock slab_mutex &n->list_lock irq_context: 0 vendor_module_lock slab_mutex pcpu_alloc_mutex irq_context: 0 vendor_module_lock slab_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 vendor_module_lock pcpu_alloc_mutex irq_context: 0 vendor_module_lock pcpu_alloc_mutex pcpu_lock irq_context: 0 vendor_module_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 vendor_module_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 vendor_module_lock &obj_hash[i].lock irq_context: 0 vendor_module_lock percpu_counters_lock irq_context: 0 vendor_module_lock fs_reclaim irq_context: 0 vendor_module_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 vendor_module_lock pool_lock#2 irq_context: 0 vendor_module_lock shrinker_rwsem irq_context: 0 vendor_module_lock &____s->seqcount irq_context: 0 vendor_module_lock &pcp->lock &zone->lock irq_context: 0 vendor_module_lock &zone->lock irq_context: 0 vendor_module_lock cpu_hotplug_lock irq_context: 0 vendor_module_lock cpu_hotplug_lock static_call_mutex irq_context: 0 vendor_module_lock cpu_hotplug_lock static_call_mutex text_mutex irq_context: 0 vendor_module_lock cpu_hotplug_lock static_call_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 vendor_module_lock cpu_hotplug_lock static_call_mutex text_mutex &rq->__lock irq_context: 0 vendor_module_lock cpu_hotplug_lock static_call_mutex text_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vendor_module_lock timekeeper_lock irq_context: 0 vendor_module_lock timekeeper_lock pvclock_gtod_data irq_context: hardirq timekeeper_lock tk_core.seq.seqcount pvclock_gtod_data irq_context: softirq (&tcp_orphan_timer) irq_context: softirq (&tcp_orphan_timer) &obj_hash[i].lock irq_context: softirq (&tcp_orphan_timer) &base->lock irq_context: softirq (&tcp_orphan_timer) &base->lock &obj_hash[i].lock irq_context: softirq &(&cache_cleaner)->timer irq_context: softirq &(&cache_cleaner)->timer rcu_read_lock &pool->lock irq_context: softirq &(&cache_cleaner)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&cache_cleaner)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&cache_cleaner)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&cache_cleaner)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 misc_mtx lock kernfs_idr_lock pool_lock#2 irq_context: 0 misc_mtx &p->pi_lock &cfs_rq->removed.lock irq_context: 0 misc_mtx rcu_read_lock &pool->lock/1 irq_context: 0 misc_mtx rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 misc_mtx rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 misc_mtx rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 misc_mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 misc_mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &pcp->lock &zone->lock &____s->seqcount irq_context: 0 cpu_hotplug_lock cpuhp_state-up &x->wait#9 irq_context: 0 cpu_hotplug_lock cpuhp_state-up &k->list_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex &k->list_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex fs_reclaim irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex pool_lock#2 irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex lock kernfs_idr_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex &root->kernfs_rwsem irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 cpu_hotplug_lock cpuhp_state-up lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up lock kernfs_idr_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &root->kernfs_rwsem irq_context: 0 cpu_hotplug_lock cpuhp_state-up &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 cpu_hotplug_lock cpuhp_state-up bus_type_sem irq_context: 0 cpu_hotplug_lock cpuhp_state-up sysfs_symlink_target_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &root->kernfs_rwsem irq_context: 0 cpu_hotplug_lock cpuhp_state-up &dev->power.lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up dpm_list_mtx irq_context: 0 cpu_hotplug_lock cpuhp_state-up req_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &p->pi_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock cpuhp_state-up &x->wait#11 irq_context: 0 cpu_hotplug_lock cpuhp_state-up &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#2 irq_context: 0 &type->i_mutex_dir_key#2 fs_reclaim irq_context: 0 &type->i_mutex_dir_key#2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#2 pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#2 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#2 rcu_read_lock rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#2 &dentry->d_lock &wq irq_context: 0 rcu_read_lock &sb->s_type->i_lock_key#5 irq_context: 0 cpu_hotplug_lock cpuhp_state-up uevent_sock_mutex irq_context: 0 cpu_hotplug_lock cpuhp_state-up rcu_read_lock &pool->lock/1 irq_context: 0 cpu_hotplug_lock cpuhp_state-up rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 cpu_hotplug_lock cpuhp_state-up rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock cpuhp_state-up running_helpers_waitq.lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up subsys mutex#24 irq_context: 0 cpu_hotplug_lock cpuhp_state-up subsys mutex#24 &k->k_lock irq_context: 0 &mm->mmap_lock &rq->__lock irq_context: 0 &mm->mmap_lock &pcp->lock &zone->lock irq_context: 0 &mm->mmap_lock &zone->lock irq_context: 0 &mm->mmap_lock &____s->seqcount irq_context: 0 &mm->mmap_lock &c->lock irq_context: 0 &mm->mmap_lock fs_reclaim &rq->__lock irq_context: 0 &sig->cred_guard_mutex &rq->__lock irq_context: 0 &sig->cred_guard_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sig->cred_guard_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) lock pidmap_lock pool_lock#2 irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex &pcp->lock &zone->lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex &zone->lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex &____s->seqcount irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex rcu_read_lock pool_lock#2 irq_context: 0 cpu_hotplug_lock cpuhp_state-up gdp_mutex &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up subsys mutex#25 irq_context: 0 cpu_hotplug_lock cpuhp_state-up subsys mutex#25 &k->k_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &rq->__lock &cfs_rq->removed.lock irq_context: 0 crypto_alg_sem irq_context: 0 pm_qos_lock irq_context: 0 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) batched_entropy_u64.lock crngs.lock irq_context: 0 subsys mutex#26 irq_context: 0 subsys mutex#27 irq_context: 0 subsys mutex#27 &k->list_lock irq_context: 0 subsys mutex#27 &k->k_lock irq_context: 0 &type->s_umount_key#25/1 &dd->lock irq_context: 0 &type->s_umount_key#25/1 rcu_read_lock &pool->lock irq_context: 0 &type->s_umount_key#25/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 subsys mutex#28 irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key rcu_read_lock &dentry->d_lock irq_context: 0 &sig->cred_guard_mutex &obj_hash[i].lock pool_lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock &obj_hash[i].lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock pool_lock#2 irq_context: 0 trace_event_sem trace_event_sem.wait_lock irq_context: 0 trace_event_sem &rq->__lock irq_context: 0 trace_event_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_callback rcu_read_lock &pool->lock irq_context: softirq rcu_callback rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq rcu_callback rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq rcu_callback rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq rcu_callback rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&p->wq) irq_context: 0 (wq_completion)events (work_completion)(&p->wq) vmap_area_lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) purge_vmap_area_lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&p->wq) pool_lock#2 irq_context: softirq rcu_callback &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &obj_hash[i].lock pool_lock irq_context: softirq (&timer.timer) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pool_lock irq_context: softirq &(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->timer irq_context: softirq &(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->timer rcu_read_lock &pool->lock irq_context: softirq &(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&group->avgs_work)->timer irq_context: softirq &(&group->avgs_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&group->avgs_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&group->avgs_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&group->avgs_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&group->avgs_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq mm/memcontrol.c:589 irq_context: softirq mm/memcontrol.c:589 rcu_read_lock &pool->lock/1 irq_context: softirq mm/memcontrol.c:589 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq mm/memcontrol.c:589 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_unbound (stats_flush_dwork).work irq_context: 0 (wq_completion)events_unbound (stats_flush_dwork).work cgroup_rstat_lock irq_context: 0 (wq_completion)events_unbound (stats_flush_dwork).work cgroup_rstat_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)events_unbound (stats_flush_dwork).work &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (stats_flush_dwork).work &base->lock irq_context: 0 (wq_completion)events_unbound (stats_flush_dwork).work &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) &group->avgs_lock irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) &group->avgs_lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) &group->avgs_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) &group->avgs_lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) &group->avgs_lock &base->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#25/1 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &type->s_umount_key#25/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &type->s_umount_key#25/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#25/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#25/1 bit_wait_table + i irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) rcu_read_lock &base->lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) rcu_read_lock &base->lock &obj_hash[i].lock irq_context: softirq bit_wait_table + i irq_context: softirq bit_wait_table + i &p->pi_lock irq_context: softirq bit_wait_table + i &p->pi_lock &rq->__lock irq_context: softirq bit_wait_table + i &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&s->destroy_work) irq_context: 0 &type->s_umount_key#25/1 &wq->mutex &x->wait#10 irq_context: 0 &type->s_umount_key#25/1 wq_mayday_lock irq_context: 0 &p->alloc_lock &x->wait &p->pi_lock irq_context: 0 &p->alloc_lock &x->wait &p->pi_lock &rq->__lock irq_context: 0 &p->alloc_lock &x->wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#25/1 wq_pool_mutex &wq->mutex &pool->lock irq_context: 0 &type->s_umount_key#25/1 rcu_state.exp_mutex rcu_node_0 irq_context: 0 &type->s_umount_key#25/1 rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &type->s_umount_key#25/1 rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &type->s_umount_key#25/1 rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#25/1 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &type->s_umount_key#25/1 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#25/1 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#25/1 rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 &type->s_umount_key#25/1 rcu_state.exp_mutex &rq->__lock irq_context: 0 &type->s_umount_key#25/1 rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#25/1 &sbi->old_work_lock irq_context: 0 &type->s_umount_key#25/1 (work_completion)(&(&sbi->old_work)->work) irq_context: 0 &type->s_umount_key#25/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#25/1 sb_lock irq_context: 0 &xa->xa_lock#4 irq_context: 0 sb_lock &obj_hash[i].lock irq_context: 0 sb_lock pool_lock#2 irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#26/1 irq_context: softirq &(&kfence_timer)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#26/1 fs_reclaim irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#26/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#26/1 pool_lock#2 irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#26/1 pcpu_alloc_mutex irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#26/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#26/1 shrinker_rwsem irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#26/1 list_lrus_mutex irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#26/1 sb_lock irq_context: 0 &type->s_umount_key#26/1 irq_context: 0 pmus_lock fs_reclaim irq_context: 0 pmus_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pmus_lock &k->list_lock irq_context: 0 pmus_lock lock irq_context: 0 pmus_lock lock kernfs_idr_lock irq_context: 0 pmus_lock &root->kernfs_rwsem irq_context: 0 pmus_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 pmus_lock uevent_sock_mutex irq_context: 0 pmus_lock rcu_read_lock &pool->lock/1 irq_context: 0 pmus_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 pmus_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 pmus_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 pmus_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 pmus_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pmus_lock running_helpers_waitq.lock irq_context: 0 pmus_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) batched_entropy_u64.lock crngs.lock base_crng.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) lock pidmap_lock &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) lock pidmap_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) lock pidmap_lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) lock pidmap_lock &____s->seqcount irq_context: 0 pmus_lock &x->wait#9 irq_context: 0 pmus_lock bus_type_sem irq_context: 0 pmus_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 pmus_lock &c->lock irq_context: 0 pmus_lock &pcp->lock &zone->lock irq_context: 0 pmus_lock &zone->lock irq_context: 0 pmus_lock &____s->seqcount irq_context: 0 pmus_lock sysfs_symlink_target_lock irq_context: 0 pmus_lock &k->k_lock irq_context: 0 pmus_lock &root->kernfs_rwsem irq_context: 0 pmus_lock &dev->power.lock irq_context: 0 pmus_lock dpm_list_mtx irq_context: 0 pmus_lock &dev->mutex &k->list_lock irq_context: 0 pmus_lock &dev->mutex &k->k_lock irq_context: 0 pmus_lock &dev->mutex &dev->power.lock irq_context: 0 pmus_lock subsys mutex#29 irq_context: 0 &type->s_umount_key#26/1 fs_reclaim irq_context: 0 &type->s_umount_key#26/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#26/1 &c->lock irq_context: 0 &type->s_umount_key#26/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#26/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->s_umount_key#26/1 &____s->seqcount irq_context: 0 &type->s_umount_key#26/1 pool_lock#2 irq_context: 0 &type->s_umount_key#26/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#26/1 &zone->lock irq_context: 0 &type->s_umount_key#26/1 &xa->xa_lock#8 irq_context: 0 &type->s_umount_key#26/1 lock#4 irq_context: 0 &type->s_umount_key#26/1 &mapping->private_lock irq_context: 0 &type->s_umount_key#26/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#26/1 &dd->lock irq_context: 0 &type->s_umount_key#26/1 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#26/1 rcu_read_lock &pool->lock irq_context: 0 &type->s_umount_key#26/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#26/1 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &type->s_umount_key#26/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &type->s_umount_key#26/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 key_user_lock irq_context: 0 key_serial_lock irq_context: 0 key_construction_mutex irq_context: 0 &type->lock_class irq_context: 0 &type->lock_class keyring_serialise_link_lock irq_context: 0 &type->lock_class keyring_serialise_link_lock fs_reclaim irq_context: 0 &type->lock_class keyring_serialise_link_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->lock_class keyring_serialise_link_lock &c->lock irq_context: 0 &type->lock_class keyring_serialise_link_lock &____s->seqcount irq_context: 0 &type->lock_class keyring_serialise_link_lock pool_lock#2 irq_context: 0 &type->lock_class keyring_serialise_link_lock &obj_hash[i].lock irq_context: 0 keyring_serialise_link_lock irq_context: 0 &pgdat->kswapd_lock fs_reclaim irq_context: 0 &pgdat->kswapd_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &pgdat->kswapd_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &c->lock irq_context: 0 &pgdat->kswapd_lock kthread_create_lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &pcp->lock &zone->lock irq_context: 0 &pgdat->kswapd_lock &p->pi_lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &zone->lock irq_context: 0 &pgdat->kswapd_lock &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &____s->seqcount irq_context: 0 &pgdat->kswapd_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pgdat->kswapd_lock &rq->__lock irq_context: 0 &pgdat->kswapd_lock &x->wait irq_context: 0 &pgdat->kswapd_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &pgdat->kswapd_lock &obj_hash[i].lock irq_context: 0 &pgdat->kswapd_wait irq_context: 0 list_lrus_mutex irq_context: 0 drivers_lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex fs_reclaim irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &c->lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &____s->seqcount irq_context: 0 damon_dbgfs_lock irq_context: 0 damon_dbgfs_lock fs_reclaim irq_context: 0 damon_dbgfs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 damon_dbgfs_lock pool_lock#2 irq_context: 0 damon_dbgfs_lock tk_core.seq.seqcount irq_context: 0 damon_dbgfs_lock damon_ops_lock irq_context: 0 damon_dbgfs_lock pin_fs_lock irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 damon_dbgfs_lock &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 misc_mtx &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->s_umount_key#21/1 irq_context: 0 &type->s_umount_key#21/1 fs_reclaim irq_context: 0 &type->s_umount_key#21/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#21/1 pool_lock#2 irq_context: 0 &type->s_umount_key#21/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#21/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#21/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#21/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#21/1 sb_lock irq_context: 0 &type->s_umount_key#21/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#21/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#21/1 &sb->s_type->i_lock_key#18 irq_context: 0 &type->s_umount_key#21/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#21/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#21/1 &sb->s_type->i_lock_key#18 &dentry->d_lock irq_context: 0 &type->s_umount_key#21/1 &dentry->d_lock irq_context: 0 dq_list_lock irq_context: 0 &type->s_umount_key#22/1 irq_context: 0 &type->s_umount_key#22/1 fs_reclaim irq_context: 0 &type->s_umount_key#22/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#22/1 pool_lock#2 irq_context: 0 &type->s_umount_key#22/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#22/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#22/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#22/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#22/1 sb_lock irq_context: 0 &type->s_umount_key#22/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#22/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#22/1 &sb->s_type->i_lock_key#19 irq_context: 0 &type->s_umount_key#22/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#22/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#22/1 &sb->s_type->i_lock_key#19 &dentry->d_lock irq_context: 0 &type->s_umount_key#22/1 &dentry->d_lock irq_context: 0 configfs_subsystem_mutex irq_context: 0 &sb->s_type->i_mutex_key#7/1 irq_context: 0 &sb->s_type->i_mutex_key#7/1 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#7/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#7/1 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#7/1 &c->lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 configfs_dirent_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#7/1 &sb->s_type->i_lock_key#19 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &s->s_inode_list_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#7/1 &sb->s_type->i_lock_key#19 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 irq_context: 0 misc_mtx &obj_hash[i].lock pool_lock irq_context: 0 misc_mtx &cfs_rq->removed.lock irq_context: 0 misc_mtx &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &pcp->lock &zone->lock &____s->seqcount irq_context: 0 console_owner_lock irq_context: 0 ecryptfs_daemon_hash_mux irq_context: 0 &ecryptfs_kthread_ctl.wait irq_context: 0 ecryptfs_daemon_hash_mux fs_reclaim irq_context: 0 ecryptfs_daemon_hash_mux fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 ecryptfs_daemon_hash_mux pool_lock#2 irq_context: 0 ecryptfs_msg_ctx_lists_mux irq_context: 0 ecryptfs_msg_ctx_lists_mux &ecryptfs_msg_ctx_arr[i].mux irq_context: 0 pernet_ops_rwsem tk_core.seq.seqcount irq_context: 0 pernet_ops_rwsem &k->list_lock irq_context: 0 pernet_ops_rwsem lock irq_context: 0 pernet_ops_rwsem lock kernfs_idr_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock/1 irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem running_helpers_waitq.lock irq_context: 0 pernet_ops_rwsem &rq->__lock irq_context: 0 nfs_version_lock irq_context: 0 key_types_sem irq_context: 0 key_types_sem (console_sem).lock irq_context: 0 key_types_sem console_lock console_srcu console_owner_lock irq_context: 0 key_types_sem console_lock console_srcu console_owner irq_context: 0 key_types_sem console_lock console_srcu console_owner &port_lock_key irq_context: 0 key_types_sem console_lock console_srcu console_owner console_owner_lock irq_context: 0 pnfs_spinlock irq_context: 0 pernet_ops_rwsem &sn->pipefs_sb_lock irq_context: 0 pernet_ops_rwsem krc.lock irq_context: 0 pernet_ops_rwsem krc.lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem krc.lock hrtimer_bases.lock irq_context: 0 pernet_ops_rwsem krc.lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 pernet_ops_rwsem krc.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem krc.lock &base->lock irq_context: 0 pernet_ops_rwsem krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_highpri (work_completion)(&(&krcp->page_cache_work)->work) &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock lock#4 rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock lock#4 &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem batched_entropy_u32.lock crngs.lock irq_context: 0 pernet_ops_rwsem &s->s_inode_list_lock irq_context: 0 nls_lock irq_context: hardirq hrtimer_bases.lock fill_pool_map-wait-type-override &c->lock irq_context: hardirq hrtimer_bases.lock fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: hardirq hrtimer_bases.lock fill_pool_map-wait-type-override &zone->lock irq_context: hardirq hrtimer_bases.lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 jffs2_compressor_list_lock irq_context: 0 tasklist_lock &c->lock irq_context: 0 next_tag_value_lock irq_context: softirq drivers/char/random.c:251 rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: softirq drivers/char/random.c:251 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq rcu_callback rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 log_redrive_lock irq_context: 0 &TxAnchor.LazyLock irq_context: 0 &TxAnchor.LazyLock jfs_commit_thread_wait.lock irq_context: 0 jfsTxnLock irq_context: 0 slab_mutex batched_entropy_u8.lock irq_context: 0 slab_mutex batched_entropy_u8.lock crngs.lock irq_context: 0 slab_mutex kfence_freelist_lock irq_context: 0 ocfs2_stack_lock irq_context: 0 ocfs2_stack_lock (console_sem).lock irq_context: 0 ocfs2_stack_lock console_lock console_srcu console_owner_lock irq_context: 0 ocfs2_stack_lock console_lock console_srcu console_owner irq_context: 0 ocfs2_stack_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 ocfs2_stack_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 misc_mtx rcu_read_lock pool_lock#2 irq_context: 0 fill_pool_map-wait-type-override &pcp->lock &zone->lock &____s->seqcount irq_context: 0 o2hb_callback_sem irq_context: 0 o2net_handler_lock irq_context: 0 &type->s_umount_key#23/1 irq_context: 0 &type->s_umount_key#23/1 fs_reclaim irq_context: 0 &type->s_umount_key#23/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#23/1 pool_lock#2 irq_context: 0 &type->s_umount_key#23/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#23/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#23/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#23/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#23/1 sb_lock irq_context: 0 &type->s_umount_key#23/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#23/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#23/1 &zone->lock irq_context: 0 &type->s_umount_key#23/1 &____s->seqcount irq_context: 0 &type->s_umount_key#23/1 &c->lock irq_context: 0 &type->s_umount_key#23/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#23/1 &sb->s_type->i_lock_key#20 irq_context: 0 &type->s_umount_key#23/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#23/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#23/1 &sb->s_type->i_lock_key#20 &dentry->d_lock irq_context: 0 &type->s_umount_key#23/1 &dentry->d_lock irq_context: 0 pernet_ops_rwsem nf_hook_mutex irq_context: 0 pernet_ops_rwsem nf_hook_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem nf_hook_mutex &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem nf_hook_mutex &zone->lock irq_context: 0 pernet_ops_rwsem nf_hook_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem nf_hook_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_hook_mutex &c->lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock jump_label_mutex irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 alg_types_sem irq_context: 0 alg_types_sem fs_reclaim irq_context: 0 alg_types_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 alg_types_sem pool_lock#2 irq_context: 0 dma_list_mutex irq_context: 0 asymmetric_key_parsers_sem irq_context: 0 asymmetric_key_parsers_sem (console_sem).lock irq_context: 0 asymmetric_key_parsers_sem console_lock console_srcu console_owner_lock irq_context: 0 asymmetric_key_parsers_sem console_lock console_srcu console_owner irq_context: 0 asymmetric_key_parsers_sem console_lock console_srcu console_owner &port_lock_key irq_context: 0 asymmetric_key_parsers_sem console_lock console_srcu console_owner console_owner_lock irq_context: 0 blkcg_pol_register_mutex irq_context: 0 blkcg_pol_register_mutex blkcg_pol_mutex irq_context: 0 blkcg_pol_register_mutex cgroup_mutex irq_context: 0 blkcg_pol_register_mutex cgroup_mutex &root->kernfs_rwsem irq_context: 0 blkcg_pol_register_mutex blkcg_pol_mutex fs_reclaim irq_context: 0 blkcg_pol_register_mutex blkcg_pol_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 blkcg_pol_register_mutex blkcg_pol_mutex pool_lock#2 irq_context: 0 blkcg_pol_register_mutex cgroup_mutex fs_reclaim irq_context: 0 blkcg_pol_register_mutex cgroup_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 blkcg_pol_register_mutex cgroup_mutex pool_lock#2 irq_context: 0 blkcg_pol_register_mutex cgroup_mutex lock irq_context: 0 blkcg_pol_register_mutex cgroup_mutex lock kernfs_idr_lock irq_context: 0 blkcg_pol_register_mutex cgroup_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 blkcg_pol_register_mutex cgroup_mutex &c->lock irq_context: 0 blkcg_pol_register_mutex cgroup_mutex &pcp->lock &zone->lock irq_context: 0 blkcg_pol_register_mutex cgroup_mutex &zone->lock irq_context: 0 blkcg_pol_register_mutex cgroup_mutex &____s->seqcount irq_context: 0 elv_list_lock irq_context: 0 crc_t10dif_mutex irq_context: 0 crc_t10dif_mutex crypto_alg_sem irq_context: 0 crc_t10dif_mutex fs_reclaim irq_context: 0 crc_t10dif_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 crc_t10dif_mutex pool_lock#2 irq_context: 0 crc64_rocksoft_mutex irq_context: 0 crc64_rocksoft_mutex crypto_alg_sem irq_context: 0 crc64_rocksoft_mutex fs_reclaim irq_context: 0 crc64_rocksoft_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 crc64_rocksoft_mutex pool_lock#2 irq_context: 0 ts_mod_lock irq_context: 0 &sig->cred_guard_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 &sig->cred_guard_mutex fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex fill_pool_map-wait-type-override &zone->lock irq_context: 0 &sig->cred_guard_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 &dev->mutex device_links_srcu irq_context: 0 &dev->mutex fwnode_link_lock irq_context: 0 &dev->mutex device_links_lock irq_context: 0 &dev->mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex fs_reclaim irq_context: 0 &dev->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex pool_lock#2 irq_context: 0 &dev->mutex lock irq_context: 0 &dev->mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex &x->wait#9 irq_context: 0 &dev->mutex &obj_hash[i].lock irq_context: 0 &dev->mutex gdp_mutex irq_context: 0 &dev->mutex gdp_mutex &k->list_lock irq_context: 0 &dev->mutex gdp_mutex fs_reclaim irq_context: 0 &dev->mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex gdp_mutex pool_lock#2 irq_context: 0 &dev->mutex gdp_mutex lock irq_context: 0 &dev->mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex bus_type_sem irq_context: 0 &dev->mutex &c->lock irq_context: 0 &dev->mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex &zone->lock irq_context: 0 &dev->mutex &____s->seqcount irq_context: 0 &dev->mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex dpm_list_mtx irq_context: 0 &dev->mutex uevent_sock_mutex irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex running_helpers_waitq.lock irq_context: 0 &dev->mutex &rq->__lock irq_context: 0 &dev->mutex subsys mutex#30 irq_context: 0 &dev->mutex subsys mutex#30 &k->k_lock irq_context: 0 &dev->mutex (console_sem).lock irq_context: 0 &dev->mutex console_lock console_srcu console_owner_lock irq_context: 0 &dev->mutex console_lock console_srcu console_owner irq_context: 0 &dev->mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 &dev->mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 &dev->mutex input_mutex irq_context: 0 &dev->mutex input_mutex fs_reclaim irq_context: 0 &dev->mutex input_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex input_mutex pool_lock#2 irq_context: 0 &dev->mutex input_mutex &dev->mutex#2 irq_context: 0 &dev->mutex input_mutex input_devices_poll_wait.lock irq_context: 0 &dev->mutex semaphore->lock irq_context: 0 &dev->mutex *(&acpi_gbl_hardware_lock) irq_context: 0 &dev->mutex wakeup_ida.xa_lock irq_context: 0 &dev->mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex subsys mutex#15 irq_context: 0 &dev->mutex subsys mutex#15 &k->k_lock irq_context: 0 &dev->mutex events_lock irq_context: 0 &dev->mutex wakeup_srcu_srcu_usage.lock irq_context: 0 &dev->mutex wakeup_srcu_srcu_usage.lock &obj_hash[i].lock irq_context: 0 &dev->mutex wakeup_srcu_srcu_usage.lock &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex wakeup_srcu_srcu_usage.lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &dev->mutex wakeup_srcu_srcu_usage.lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &dev->mutex wakeup_srcu_srcu_usage.lock fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 &dev->mutex wakeup_srcu_srcu_usage.lock fill_pool_map-wait-type-override &zone->lock irq_context: 0 &dev->mutex wakeup_srcu_srcu_usage.lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &dev->mutex wakeup_srcu_srcu_usage.lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &dev->mutex &ACCESS_PRIVATE(sdp, lock) irq_context: 0 &dev->mutex wakeup_srcu irq_context: 0 &dev->mutex wakeup_srcu_srcu_usage.lock &ACCESS_PRIVATE(sdp, lock) irq_context: 0 &dev->mutex wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock irq_context: 0 &dev->mutex wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &dev->mutex wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &dev->mutex wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex wakeup_srcu_srcu_usage.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) wakeup_srcu_srcu_usage.lock irq_context: 0 &dev->mutex &x->wait#3 irq_context: 0 &dev->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &ssp->srcu_sup->srcu_cb_mutex wakeup_srcu_srcu_usage.lock irq_context: 0 &dev->mutex (&ws->timer) irq_context: 0 &dev->mutex &base->lock irq_context: 0 &dev->mutex &k->k_lock klist_remove_lock irq_context: 0 &dev->mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 &dev->mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 &dev->mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 &dev->mutex subsys mutex#15 &k->k_lock klist_remove_lock irq_context: 0 &dev->mutex &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock irq_context: 0 &dev->mutex &root->kernfs_rwsem kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex deferred_probe_mutex irq_context: 0 &dev->mutex mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex kernfs_idr_lock irq_context: 0 &dev->mutex &ws->lock irq_context: 0 &dev->mutex deleted_ws.lock irq_context: 0 &dev->mutex probe_waitqueue.lock irq_context: 0 &dev->mutex input_mutex &c->lock irq_context: 0 &dev->mutex input_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex input_mutex &zone->lock irq_context: 0 &dev->mutex input_mutex &____s->seqcount irq_context: 0 &dev->mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fill_pool_map-wait-type-override &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 register_count_mutex irq_context: 0 register_count_mutex &k->list_lock irq_context: 0 register_count_mutex fs_reclaim irq_context: 0 register_count_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 register_count_mutex pool_lock#2 irq_context: 0 register_count_mutex lock irq_context: 0 register_count_mutex lock kernfs_idr_lock irq_context: 0 register_count_mutex &root->kernfs_rwsem irq_context: 0 register_count_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 register_count_mutex &k->k_lock irq_context: 0 register_count_mutex &c->lock irq_context: 0 register_count_mutex &pcp->lock &zone->lock irq_context: 0 register_count_mutex &zone->lock irq_context: 0 register_count_mutex &____s->seqcount irq_context: 0 register_count_mutex uevent_sock_mutex irq_context: 0 register_count_mutex &obj_hash[i].lock irq_context: 0 register_count_mutex rcu_read_lock &pool->lock/1 irq_context: 0 register_count_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 register_count_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 register_count_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 register_count_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 register_count_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 register_count_mutex running_helpers_waitq.lock irq_context: 0 register_count_mutex &rq->__lock irq_context: 0 (cpufreq_policy_notifier_list).rwsem irq_context: 0 &dev->mutex cpu_add_remove_lock irq_context: 0 &dev->mutex cpuidle_driver_lock irq_context: 0 &dev->mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex cpuidle_lock irq_context: 0 &dev->mutex cpuidle_lock fs_reclaim irq_context: 0 &dev->mutex cpuidle_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex cpuidle_lock pool_lock#2 irq_context: 0 &dev->mutex cpuidle_lock lock irq_context: 0 &dev->mutex cpuidle_lock lock kernfs_idr_lock irq_context: 0 &dev->mutex cpuidle_lock &root->kernfs_rwsem irq_context: 0 &dev->mutex cpuidle_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex cpuidle_lock &c->lock irq_context: 0 &dev->mutex cpuidle_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex cpuidle_lock &zone->lock irq_context: 0 &dev->mutex cpuidle_lock &____s->seqcount irq_context: 0 &dev->mutex thermal_cdev_ida.xa_lock irq_context: 0 &dev->mutex cpufreq_driver_lock irq_context: 0 &dev->mutex batched_entropy_u8.lock irq_context: 0 &dev->mutex batched_entropy_u8.lock crngs.lock irq_context: 0 &dev->mutex kfence_freelist_lock irq_context: 0 &dev->mutex subsys mutex#31 irq_context: 0 &dev->mutex subsys mutex#31 &k->k_lock irq_context: 0 &dev->mutex thermal_list_lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 &dev->mutex tick_broadcast_lock irq_context: 0 &dev->mutex cpuidle_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &rq->__lock irq_context: 0 &dev->mutex &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 &x->wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (x86_mce_decoder_chain).rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &k->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) lock kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) lock kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) uevent_sock_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) running_helpers_waitq.lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem irq_context: 0 &dev->mutex &drv->dynids.lock irq_context: 0 &dev->mutex pci_config_lock irq_context: 0 &dev->mutex *(&acpi_gbl_reference_count_lock) irq_context: 0 &dev->mutex acpi_link_lock irq_context: 0 &dev->mutex acpi_link_lock fs_reclaim irq_context: 0 &dev->mutex acpi_link_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex acpi_link_lock pool_lock#2 irq_context: 0 &dev->mutex acpi_link_lock semaphore->lock irq_context: 0 &dev->mutex acpi_link_lock &obj_hash[i].lock irq_context: 0 &dev->mutex acpi_link_lock &____s->seqcount irq_context: 0 &dev->mutex acpi_link_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex acpi_link_lock &zone->lock irq_context: 0 &dev->mutex acpi_link_lock rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex acpi_link_lock *(&acpi_gbl_reference_count_lock) irq_context: 0 &dev->mutex acpi_link_lock &c->lock irq_context: 0 &dev->mutex acpi_link_lock pci_config_lock irq_context: 0 &dev->mutex acpi_link_lock (console_sem).lock irq_context: 0 &dev->mutex acpi_link_lock console_lock console_srcu console_owner_lock irq_context: 0 &dev->mutex acpi_link_lock console_lock console_srcu console_owner irq_context: 0 &dev->mutex acpi_link_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 &dev->mutex acpi_link_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 &dev->mutex acpi_ioapic_lock irq_context: 0 &dev->mutex acpi_ioapic_lock ioapic_mutex irq_context: 0 &dev->mutex resource_lock irq_context: 0 &dev->mutex virtio_index_ida.xa_lock irq_context: 0 &dev->mutex &dev->mutex &dev->power.lock irq_context: 0 &dev->mutex &dev->mutex &k->list_lock irq_context: 0 &dev->mutex &dev->mutex &k->k_lock irq_context: 0 &dev->mutex subsys mutex#32 irq_context: 0 &dev->mutex fwnode_link_lock &k->k_lock irq_context: 0 &dev->mutex &dev->devres_lock irq_context: 0 &dev->mutex &md->mutex irq_context: 0 &dev->mutex &md->mutex fs_reclaim irq_context: 0 &dev->mutex &md->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &md->mutex pool_lock#2 irq_context: 0 &dev->mutex &md->mutex irq_domain_mutex irq_context: 0 &dev->mutex &md->mutex irq_domain_mutex fs_reclaim irq_context: 0 &dev->mutex &md->mutex irq_domain_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &md->mutex irq_domain_mutex pool_lock#2 irq_context: 0 &dev->mutex &md->mutex irq_domain_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex resource_lock irq_context: 0 &dev->mutex memtype_lock irq_context: 0 &dev->mutex free_vmap_area_lock irq_context: 0 &dev->mutex free_vmap_area_lock &obj_hash[i].lock irq_context: 0 &dev->mutex free_vmap_area_lock pool_lock#2 irq_context: 0 &dev->mutex vmap_area_lock irq_context: 0 &dev->mutex &md->mutex pci_config_lock irq_context: 0 &dev->mutex &md->mutex &xa->xa_lock#5 irq_context: 0 &dev->mutex &md->mutex &xa->xa_lock#5 pool_lock#2 irq_context: 0 &dev->mutex &md->mutex &domain->mutex irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock fs_reclaim irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock pool_lock#2 irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock pcpu_alloc_mutex irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock pcpu_alloc_mutex pcpu_lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock &obj_hash[i].lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock lock kernfs_idr_lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock &root->kernfs_rwsem irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex &md->mutex &domain->mutex fs_reclaim irq_context: 0 &dev->mutex &md->mutex &domain->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &md->mutex &domain->mutex pool_lock#2 irq_context: 0 &dev->mutex &md->mutex &domain->mutex vector_lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex &irq_desc_lock_class irq_context: 0 &dev->mutex &md->mutex &irq_desc_lock_class irq_context: 0 &dev->mutex &md->mutex vector_lock irq_context: 0 &dev->mutex &md->mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex &md->mutex lock irq_context: 0 &dev->mutex &md->mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex &md->mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex &md->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock &c->lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock &zone->lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock &____s->seqcount irq_context: 0 &dev->mutex &md->mutex &domain->mutex &c->lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex &zone->lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex &____s->seqcount irq_context: 0 &dev->mutex &desc->request_mutex irq_context: 0 &dev->mutex &desc->request_mutex &irq_desc_lock_class irq_context: 0 &dev->mutex &desc->request_mutex &irq_desc_lock_class vector_lock irq_context: 0 &dev->mutex &desc->request_mutex &irq_desc_lock_class mask_lock irq_context: 0 &dev->mutex &desc->request_mutex &irq_desc_lock_class mask_lock tmp_mask_lock irq_context: 0 &dev->mutex &desc->request_mutex &irq_desc_lock_class mask_lock tmp_mask_lock vector_lock irq_context: 0 &dev->mutex register_lock irq_context: 0 &dev->mutex register_lock proc_subdir_lock irq_context: 0 &dev->mutex register_lock fs_reclaim irq_context: 0 &dev->mutex register_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex register_lock pool_lock#2 irq_context: 0 &dev->mutex register_lock proc_inum_ida.xa_lock irq_context: 0 &dev->mutex register_lock proc_subdir_lock irq_context: 0 &dev->mutex register_lock &____s->seqcount irq_context: 0 &dev->mutex register_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex register_lock &zone->lock irq_context: 0 &dev->mutex register_lock rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex register_lock &obj_hash[i].lock irq_context: 0 &dev->mutex register_lock &c->lock irq_context: 0 &dev->mutex &irq_desc_lock_class irq_context: 0 &dev->mutex proc_subdir_lock irq_context: 0 &dev->mutex proc_inum_ida.xa_lock irq_context: 0 &dev->mutex proc_subdir_lock irq_context: 0 &dev->mutex &zone->lock &____s->seqcount irq_context: 0 &dev->mutex &dev->vqs_list_lock irq_context: 0 &dev->mutex &vp_dev->lock irq_context: 0 &dev->mutex cpu_hotplug_lock irq_context: 0 &dev->mutex &s->s_inode_list_lock irq_context: 0 &dev->mutex (oom_notify_list).rwsem irq_context: 0 &dev->mutex &dev->config_lock irq_context: 0 vdpa_dev_lock irq_context: 0 subsys mutex#33 irq_context: 0 subsys mutex#33 &k->k_lock irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 fs_reclaim &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &obj_hash[i].lock irq_context: 0 cgroup_threadgroup_rwsem &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &dentry->d_lock &dentry->d_lock/1 irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &dentry->d_lock/1 irq_context: 0 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &mm->mmap_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock fs_reclaim &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock fs_reclaim &obj_hash[i].lock irq_context: 0 &mm->mmap_lock fs_reclaim pool_lock#2 irq_context: 0 rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &zone->lock irq_context: 0 rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 pcpu_alloc_mutex &rq->__lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &cfs_rq->removed.lock irq_context: 0 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &sig->cred_guard_mutex pool_lock irq_context: 0 fill_pool_map-wait-type-override &cfs_rq->removed.lock irq_context: 0 fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: softirq rcu_callback quarantine_lock irq_context: 0 &child->perf_event_mutex &rq->__lock irq_context: 0 rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &rq->__lock irq_context: softirq rcu_callback &meta->lock irq_context: softirq rcu_callback kfence_freelist_lock irq_context: 0 &mm->mmap_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex rcu_node_0 irq_context: hardirq &vb->stop_update_lock irq_context: hardirq &vb->stop_update_lock rcu_read_lock &pool->lock irq_context: hardirq &vb->stop_update_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: hardirq &vb->stop_update_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: hardirq &vb->stop_update_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: hardirq &vb->stop_update_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: hardirq &vb->stop_update_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: hardirq &vb->stop_update_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_freezable irq_context: 0 (wq_completion)events_freezable (work_completion)(&vb->update_balloon_stats_work) irq_context: 0 (wq_completion)events_freezable (work_completion)(&vb->update_balloon_stats_work) cpu_hotplug_lock irq_context: 0 (wq_completion)events_freezable (work_completion)(&vb->update_balloon_stats_work) &s->s_inode_list_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rq->__lock irq_context: 0 gdp_mutex &rq->__lock irq_context: 0 gdp_mutex &cfs_rq->removed.lock irq_context: 0 gdp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &rq->__lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &mm->mmap_lock &n->list_lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 fill_pool_map-wait-type-override &c->lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 fill_pool_map-wait-type-override &zone->lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 fill_pool_map-wait-type-override pool_lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock lock#4 &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock lock#4 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 pcpu_alloc_mutex &cfs_rq->removed.lock irq_context: 0 pcpu_alloc_mutex &obj_hash[i].lock irq_context: 0 pcpu_alloc_mutex pool_lock#2 irq_context: 0 pcpu_alloc_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 pcpu_alloc_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pcpu_alloc_mutex.wait_lock irq_context: 0 &x->wait#11 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &vma->vm_lock->lock &rq->__lock irq_context: 0 &mm->mmap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 pcpu_alloc_mutex rcu_read_lock &rq->__lock irq_context: 0 pcpu_alloc_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex delayed_uprobe_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex quarantine_lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &anon_vma->rwsem &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) rcu_read_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fs_reclaim &rq->__lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 quarantine_lock irq_context: 0 &mm->mmap_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &obj_hash[i].lock pool_lock irq_context: 0 &sig->cred_guard_mutex delayed_uprobe_lock delayed_uprobe_lock.wait_lock irq_context: 0 &sig->cred_guard_mutex delayed_uprobe_lock.wait_lock irq_context: 0 &sig->cred_guard_mutex delayed_uprobe_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &p->pi_lock irq_context: 0 &sig->cred_guard_mutex &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: hardirq allocation_wait.lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq rcu_callback fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq rcu_callback fill_pool_map-wait-type-override pool_lock irq_context: 0 &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &rq->__lock irq_context: 0 &sig->cred_guard_mutex &dentry->d_lock &dentry->d_lock/1 irq_context: 0 &sig->cred_guard_mutex &dentry->d_lock/1 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &root->kernfs_rwsem &rq->__lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex pnp_lock irq_context: 0 &dev->mutex serial_mutex irq_context: 0 &dev->mutex serial_mutex gpio_lookup_lock irq_context: 0 &dev->mutex serial_mutex port_mutex irq_context: 0 &dev->mutex serial_mutex port_mutex fs_reclaim irq_context: 0 &dev->mutex serial_mutex port_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex serial_mutex port_mutex pool_lock#2 irq_context: 0 &dev->mutex serial_mutex port_mutex &x->wait#9 irq_context: 0 &dev->mutex serial_mutex port_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex serial_mutex port_mutex &k->list_lock irq_context: 0 &dev->mutex serial_mutex port_mutex lock irq_context: 0 &dev->mutex serial_mutex port_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex serial_mutex port_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex serial_mutex port_mutex bus_type_sem irq_context: 0 &dev->mutex serial_mutex port_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex serial_mutex port_mutex &zone->lock irq_context: 0 &dev->mutex serial_mutex port_mutex &____s->seqcount irq_context: 0 &dev->mutex serial_mutex port_mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &k->k_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &c->lock irq_context: 0 &dev->mutex serial_mutex port_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->power.lock irq_context: 0 &dev->mutex serial_mutex port_mutex dpm_list_mtx irq_context: 0 &dev->mutex serial_mutex port_mutex uevent_sock_mutex irq_context: 0 &dev->mutex serial_mutex port_mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex serial_mutex port_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex serial_mutex port_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex serial_mutex port_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex serial_mutex port_mutex running_helpers_waitq.lock irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex &dev->power.lock irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex &k->list_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex &k->k_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex device_links_srcu irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex fwnode_link_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex device_links_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex fs_reclaim irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex pool_lock#2 irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex lock irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex deferred_probe_mutex irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex uevent_sock_mutex irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex &obj_hash[i].lock irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex running_helpers_waitq.lock irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex probe_waitqueue.lock irq_context: 0 &dev->mutex serial_mutex port_mutex subsys mutex#34 irq_context: 0 &dev->mutex serial_mutex port_mutex &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex serial_mutex port_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex serial_mutex port_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex &dev->power.lock &dev->power.wait_queue irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex &rq->__lock irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex &dev->power.lock rcu_read_lock &pool->lock irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex &dev->power.lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex &dev->power.lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex &dev->power.lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex &dev->power.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex &dev->power.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)pm irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &dev->power.lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &dev->power.lock &dev->power.wait_queue irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex fs_reclaim irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex pool_lock#2 irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex console_mutex irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex resource_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &port_lock_key irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex (console_sem).lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex console_lock console_srcu console_owner_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex console_lock console_srcu console_owner irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex ctrl_ida.xa_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &x->wait#9 irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &obj_hash[i].lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &dev->power.lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &c->lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &zone->lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &____s->seqcount irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &k->list_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex bus_type_sem irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &k->k_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex dpm_list_mtx irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex uevent_sock_mutex irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex running_helpers_waitq.lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &rq->__lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &dev->mutex &dev->power.lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &dev->mutex &k->list_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &dev->mutex &k->k_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex subsys mutex#35 irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex semaphore->lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex *(&acpi_gbl_reference_count_lock) irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex dev_pm_qos_sysfs_mtx irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex kernfs_idr_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &k->k_lock klist_remove_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex deferred_probe_mutex irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex device_links_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &cfs_rq->removed.lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex &k->list_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex fs_reclaim irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex &c->lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex &zone->lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex &____s->seqcount irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex pool_lock#2 irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex req_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &p->pi_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &x->wait#11 irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex subsys mutex#21 irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex subsys mutex#21 &k->k_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex chrdevs_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &rq->__lock irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex &c->lock irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex &zone->lock irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex serial_mutex port_mutex &dev->mutex &____s->seqcount irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex rcu_read_lock &rq->__lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: hardirq allocation_wait.lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock batched_entropy_u8.lock irq_context: 0 &mm->mmap_lock kfence_freelist_lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex &root->kernfs_rwsem kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex lock kernfs_idr_lock &zone->lock irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex lock kernfs_idr_lock &____s->seqcount irq_context: 0 &dev->mutex serial_mutex port_mutex &port->mutex gdp_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 misc_mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex rng_index_ida.xa_lock irq_context: hardirq &x->wait#12 irq_context: 0 &dev->mutex rng_mutex irq_context: 0 &dev->mutex rng_mutex &x->wait#13 irq_context: 0 &dev->mutex rng_mutex fs_reclaim irq_context: 0 &dev->mutex rng_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex rng_mutex pool_lock#2 irq_context: 0 &dev->mutex rng_mutex kthread_create_lock irq_context: 0 &dev->mutex rng_mutex &p->pi_lock irq_context: 0 &dev->mutex rng_mutex &x->wait irq_context: 0 &dev->mutex rng_mutex &rq->__lock irq_context: 0 &dev->mutex rng_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex rng_mutex &obj_hash[i].lock irq_context: 0 rng_mutex irq_context: 0 reading_mutex irq_context: 0 &dev->mutex reading_mutex irq_context: 0 &dev->mutex reading_mutex reading_mutex.wait_lock irq_context: 0 &dev->mutex input_pool.lock irq_context: 0 &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 &root->kernfs_rwsem pool_lock#2 irq_context: 0 &root->kernfs_rwsem &____s->seqcount irq_context: 0 &root->kernfs_rwsem &pcp->lock &zone->lock irq_context: 0 &root->kernfs_rwsem &zone->lock irq_context: 0 &root->kernfs_rwsem rcu_read_lock pool_lock#2 irq_context: 0 klist_remove_lock irq_context: 0 &k->k_lock klist_remove_lock irq_context: 0 kernfs_idr_lock irq_context: 0 misc_mtx &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 &dev->devres_lock irq_context: 0 &dev->managed.lock irq_context: 0 &type->s_umount_key#24/1 irq_context: 0 &type->s_umount_key#24/1 fs_reclaim irq_context: 0 &type->s_umount_key#24/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#24/1 pool_lock#2 irq_context: 0 &type->s_umount_key#24/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#24/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#24/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#24/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#24/1 sb_lock irq_context: 0 &type->s_umount_key#24/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#24/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#24/1 &sb->s_type->i_lock_key#21 irq_context: 0 &type->s_umount_key#24/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#24/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#24/1 &sb->s_type->i_lock_key#21 &dentry->d_lock irq_context: 0 &type->s_umount_key#24/1 &dentry->d_lock irq_context: 0 &sb->s_type->i_lock_key#21 irq_context: 0 lock drm_minor_lock irq_context: 0 lock drm_minor_lock pool_lock#2 irq_context: 0 stack_depot_init_mutex irq_context: 0 &dev->debugfs_mutex irq_context: 0 subsys mutex#36 irq_context: 0 subsys mutex#36 &k->k_lock irq_context: 0 drm_minor_lock irq_context: 0 &dev->debugfs_mutex &rq->__lock irq_context: hardirq &rq->__lock &rt_b->rt_runtime_lock irq_context: hardirq &rq->__lock &rt_b->rt_runtime_lock tk_core.seq.seqcount irq_context: hardirq &rq->__lock &rt_b->rt_runtime_lock hrtimer_bases.lock irq_context: 0 &dev->mode_config.idr_mutex irq_context: hardirq &rq->__lock &rt_b->rt_runtime_lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 &dev->mode_config.idr_mutex fs_reclaim irq_context: hardirq &rq->__lock &rt_b->rt_runtime_lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 &dev->mode_config.idr_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: hardirq &rt_b->rt_runtime_lock irq_context: 0 &dev->mode_config.idr_mutex pool_lock#2 irq_context: hardirq &rt_b->rt_runtime_lock tk_core.seq.seqcount irq_context: hardirq &rt_rq->rt_runtime_lock irq_context: 0 (worker)->lock irq_context: 0 crtc_ww_class_acquire irq_context: 0 crtc_ww_class_acquire crtc_ww_class_mutex irq_context: 0 crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_acquire irq_context: 0 crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_acquire reservation_ww_class_mutex irq_context: 0 &dev->mode_config.blob_lock irq_context: 0 &xa->xa_lock#6 irq_context: 0 &xa->xa_lock#7 irq_context: 0 &dev->mode_config.connector_list_lock irq_context: 0 &dev->vbl_lock irq_context: 0 drm_connector_list_iter &dev->mode_config.connector_list_lock irq_context: 0 drm_connector_list_iter fs_reclaim irq_context: 0 drm_connector_list_iter fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 drm_connector_list_iter pool_lock#2 irq_context: 0 drm_connector_list_iter &connector->mutex irq_context: 0 drm_connector_list_iter &connector->mutex fs_reclaim irq_context: 0 drm_connector_list_iter &connector->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 drm_connector_list_iter &connector->mutex pool_lock#2 irq_context: 0 drm_connector_list_iter &connector->mutex &x->wait#9 irq_context: 0 drm_connector_list_iter &connector->mutex &obj_hash[i].lock irq_context: 0 drm_connector_list_iter &connector->mutex &k->list_lock irq_context: 0 drm_connector_list_iter &connector->mutex &pcp->lock &zone->lock irq_context: 0 drm_connector_list_iter &connector->mutex &zone->lock irq_context: 0 drm_connector_list_iter &connector->mutex &____s->seqcount irq_context: 0 drm_connector_list_iter &connector->mutex rcu_read_lock pool_lock#2 irq_context: 0 drm_connector_list_iter &connector->mutex lock irq_context: 0 drm_connector_list_iter &connector->mutex lock kernfs_idr_lock irq_context: 0 drm_connector_list_iter &connector->mutex &root->kernfs_rwsem irq_context: 0 drm_connector_list_iter &connector->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 drm_connector_list_iter &connector->mutex bus_type_sem irq_context: 0 drm_connector_list_iter &connector->mutex sysfs_symlink_target_lock irq_context: 0 drm_connector_list_iter &connector->mutex &c->lock irq_context: 0 drm_connector_list_iter &connector->mutex &root->kernfs_rwsem irq_context: 0 drm_connector_list_iter &connector->mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 drm_connector_list_iter &connector->mutex &dev->power.lock irq_context: 0 drm_connector_list_iter &connector->mutex dpm_list_mtx irq_context: 0 drm_connector_list_iter &connector->mutex uevent_sock_mutex irq_context: 0 drm_connector_list_iter &connector->mutex rcu_read_lock &pool->lock/1 irq_context: 0 drm_connector_list_iter &connector->mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 drm_connector_list_iter &connector->mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 drm_connector_list_iter &connector->mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 drm_connector_list_iter &connector->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 drm_connector_list_iter &connector->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 drm_connector_list_iter &connector->mutex running_helpers_waitq.lock irq_context: 0 drm_connector_list_iter &connector->mutex &rq->__lock irq_context: 0 drm_connector_list_iter &connector->mutex &k->k_lock irq_context: 0 drm_connector_list_iter &connector->mutex subsys mutex#36 irq_context: 0 drm_connector_list_iter &connector->mutex subsys mutex#36 &k->k_lock irq_context: 0 drm_connector_list_iter &connector->mutex pin_fs_lock irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 drm_connector_list_iter &connector->mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 drm_connector_list_iter &connector->mutex &dev->mode_config.idr_mutex irq_context: 0 drm_connector_list_iter &connector->mutex connector_list_lock irq_context: 0 &dev->filelist_mutex irq_context: 0 &dev->clientlist_mutex irq_context: 0 &dev->clientlist_mutex &helper->lock irq_context: 0 &dev->clientlist_mutex &helper->lock drm_connector_list_iter &dev->mode_config.connector_list_lock irq_context: 0 &dev->clientlist_mutex &helper->lock drm_connector_list_iter fs_reclaim irq_context: 0 &dev->clientlist_mutex &helper->lock drm_connector_list_iter fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->clientlist_mutex &helper->lock drm_connector_list_iter pool_lock#2 irq_context: 0 &dev->clientlist_mutex &helper->lock fs_reclaim irq_context: 0 &dev->clientlist_mutex &helper->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->clientlist_mutex &helper->lock pool_lock#2 irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex &dev->mode_config.mutex irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex &dev->mode_config.mutex crtc_ww_class_acquire irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex &dev->mode_config.mutex crtc_ww_class_acquire crtc_ww_class_mutex irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex &dev->mode_config.mutex crtc_ww_class_acquire crtc_ww_class_mutex fs_reclaim irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex &dev->mode_config.mutex crtc_ww_class_acquire crtc_ww_class_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex &dev->mode_config.mutex crtc_ww_class_acquire crtc_ww_class_mutex pool_lock#2 irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex &dev->mode_config.mutex crtc_ww_class_acquire crtc_ww_class_mutex &c->lock irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex &dev->mode_config.mutex crtc_ww_class_acquire crtc_ww_class_mutex &____s->seqcount irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex &dev->mode_config.mutex fs_reclaim irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex &dev->mode_config.mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex &dev->mode_config.mutex pool_lock#2 irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex &dev->mode_config.mutex &obj_hash[i].lock irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex fs_reclaim irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex pool_lock#2 irq_context: 0 &dev->clientlist_mutex &helper->lock &obj_hash[i].lock irq_context: 0 &dev->clientlist_mutex &helper->lock &client->modeset_mutex drm_connector_list_iter &dev->mode_config.connector_list_lock irq_context: 0 &dev->clientlist_mutex &helper->lock &sbinfo->stat_lock irq_context: 0 &dev->clientlist_mutex &helper->lock mmu_notifier_invalidate_range_start irq_context: 0 &dev->clientlist_mutex &helper->lock &sb->s_type->i_lock_key irq_context: 0 &dev->clientlist_mutex &helper->lock &s->s_inode_list_lock irq_context: 0 &dev->clientlist_mutex &helper->lock tk_core.seq.seqcount irq_context: 0 &dev->clientlist_mutex &helper->lock batched_entropy_u32.lock irq_context: 0 &dev->clientlist_mutex &helper->lock &sb->s_type->i_lock_key &dentry->d_lock irq_context: 0 &dev->clientlist_mutex &helper->lock &mgr->vm_lock irq_context: 0 &dev->clientlist_mutex &helper->lock &mgr->vm_lock pool_lock#2 irq_context: 0 &dev->clientlist_mutex &helper->lock &dev->object_name_lock irq_context: 0 &dev->clientlist_mutex &helper->lock &dev->object_name_lock lock irq_context: 0 &dev->clientlist_mutex &helper->lock &dev->object_name_lock lock &file_private->table_lock irq_context: 0 &dev->clientlist_mutex &helper->lock &dev->object_name_lock lock &file_private->table_lock pool_lock#2 irq_context: 0 &dev->clientlist_mutex &helper->lock &node->vm_lock irq_context: 0 &dev->clientlist_mutex &helper->lock &file_private->table_lock irq_context: 0 &dev->clientlist_mutex &helper->lock &dev->mode_config.idr_mutex irq_context: 0 &dev->clientlist_mutex &helper->lock &dev->mode_config.fb_lock irq_context: 0 &dev->clientlist_mutex &helper->lock &file->fbs_lock irq_context: 0 &dev->clientlist_mutex &helper->lock &prime_fpriv->lock irq_context: 0 &dev->clientlist_mutex &helper->lock &node->vm_lock &obj_hash[i].lock irq_context: 0 &dev->clientlist_mutex &helper->lock &node->vm_lock pool_lock#2 irq_context: 0 &dev->clientlist_mutex &helper->lock &file_private->table_lock &obj_hash[i].lock irq_context: 0 &dev->clientlist_mutex &helper->lock &file_private->table_lock pool_lock#2 irq_context: 0 &dev->clientlist_mutex &helper->lock free_vmap_area_lock irq_context: 0 &dev->clientlist_mutex &helper->lock vmap_area_lock irq_context: 0 &dev->clientlist_mutex &helper->lock &pcp->lock &zone->lock irq_context: 0 &dev->clientlist_mutex &helper->lock &zone->lock irq_context: 0 &dev->clientlist_mutex &helper->lock &____s->seqcount irq_context: 0 &dev->clientlist_mutex &helper->lock init_mm.page_table_lock irq_context: 0 &dev->clientlist_mutex &helper->lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->clientlist_mutex &helper->lock rcu_read_lock pool_lock#2 irq_context: 0 &dev->clientlist_mutex registration_lock irq_context: 0 &dev->clientlist_mutex registration_lock fs_reclaim irq_context: 0 &dev->clientlist_mutex registration_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->clientlist_mutex registration_lock pool_lock#2 irq_context: 0 &dev->clientlist_mutex registration_lock &x->wait#9 irq_context: 0 &dev->clientlist_mutex registration_lock &obj_hash[i].lock irq_context: 0 &dev->clientlist_mutex registration_lock &c->lock irq_context: 0 &dev->clientlist_mutex registration_lock &pcp->lock &zone->lock irq_context: 0 &dev->clientlist_mutex registration_lock &zone->lock irq_context: 0 &dev->clientlist_mutex registration_lock &____s->seqcount irq_context: 0 &dev->clientlist_mutex registration_lock &k->list_lock irq_context: 0 &dev->clientlist_mutex registration_lock gdp_mutex irq_context: 0 &dev->clientlist_mutex registration_lock gdp_mutex &k->list_lock irq_context: 0 &dev->clientlist_mutex registration_lock gdp_mutex fs_reclaim irq_context: 0 &dev->clientlist_mutex registration_lock gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->clientlist_mutex registration_lock gdp_mutex pool_lock#2 irq_context: 0 &dev->clientlist_mutex registration_lock gdp_mutex lock irq_context: 0 &dev->clientlist_mutex registration_lock gdp_mutex lock kernfs_idr_lock irq_context: 0 &dev->clientlist_mutex registration_lock gdp_mutex &root->kernfs_rwsem irq_context: 0 &dev->clientlist_mutex registration_lock gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->clientlist_mutex registration_lock lock irq_context: 0 &dev->clientlist_mutex registration_lock lock kernfs_idr_lock irq_context: 0 &dev->clientlist_mutex registration_lock &root->kernfs_rwsem irq_context: 0 &dev->clientlist_mutex registration_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->clientlist_mutex registration_lock bus_type_sem irq_context: 0 &dev->clientlist_mutex registration_lock sysfs_symlink_target_lock irq_context: 0 &dev->clientlist_mutex registration_lock &root->kernfs_rwsem irq_context: 0 &dev->clientlist_mutex registration_lock &dev->power.lock irq_context: 0 &dev->clientlist_mutex registration_lock dpm_list_mtx irq_context: 0 &dev->clientlist_mutex registration_lock req_lock irq_context: 0 &dev->clientlist_mutex registration_lock &p->pi_lock irq_context: 0 &dev->clientlist_mutex registration_lock &x->wait#11 irq_context: 0 &dev->clientlist_mutex registration_lock &rq->__lock irq_context: 0 &dev->clientlist_mutex registration_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->clientlist_mutex registration_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->clientlist_mutex registration_lock uevent_sock_mutex irq_context: 0 &dev->clientlist_mutex registration_lock rcu_read_lock &pool->lock/1 irq_context: 0 &dev->clientlist_mutex registration_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->clientlist_mutex registration_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->clientlist_mutex registration_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->clientlist_mutex registration_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->clientlist_mutex registration_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->clientlist_mutex registration_lock running_helpers_waitq.lock irq_context: 0 &dev->clientlist_mutex registration_lock &k->k_lock irq_context: 0 &dev->clientlist_mutex registration_lock subsys mutex#11 irq_context: 0 &dev->clientlist_mutex registration_lock subsys mutex#11 &k->k_lock irq_context: 0 &dev->clientlist_mutex registration_lock vt_switch_mutex irq_context: 0 &dev->clientlist_mutex registration_lock vt_switch_mutex fs_reclaim irq_context: 0 &dev->clientlist_mutex registration_lock vt_switch_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->clientlist_mutex registration_lock vt_switch_mutex pool_lock#2 irq_context: 0 &dev->clientlist_mutex registration_lock (console_sem).lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &fb_info->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock fs_reclaim irq_context: 0 &dev->clientlist_mutex registration_lock console_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &c->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &pcp->lock &zone->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &zone->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &____s->seqcount irq_context: 0 &dev->clientlist_mutex registration_lock console_lock pool_lock#2 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &obj_hash[i].lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock rcu_read_lock pool_lock#2 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &base->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &base->lock &obj_hash[i].lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &x->wait#9 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &k->list_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock gdp_mutex irq_context: 0 &dev->clientlist_mutex registration_lock console_lock gdp_mutex &k->list_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock lock kernfs_idr_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &root->kernfs_rwsem irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->clientlist_mutex registration_lock console_lock bus_type_sem irq_context: 0 &dev->clientlist_mutex registration_lock console_lock sysfs_symlink_target_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &root->kernfs_rwsem irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &dev->power.lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock dpm_list_mtx irq_context: 0 &dev->clientlist_mutex registration_lock console_lock uevent_sock_mutex irq_context: 0 &dev->clientlist_mutex registration_lock console_lock rcu_read_lock &pool->lock/1 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->clientlist_mutex registration_lock console_lock running_helpers_waitq.lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &rq->__lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock subsys mutex#5 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock subsys mutex#5 &k->k_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock vga_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire fs_reclaim irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire &____s->seqcount irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire &pcp->lock &zone->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire &zone->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire pool_lock#2 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire rcu_read_lock pool_lock#2 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire &obj_hash[i].lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex fs_reclaim irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex pool_lock#2 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &obj_hash[i].lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &dev->mode_config.idr_mutex irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &dev->mode_config.blob_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex drm_connector_list_iter &dev->mode_config.connector_list_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &crtc->commit_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock fs_reclaim irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock pool_lock#2 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock &pcp->lock &zone->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock &zone->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock &____s->seqcount irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock &xa->xa_lock#8 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock lock#4 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock &info->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock &xa->xa_lock#8 &c->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock &xa->xa_lock#8 &pcp->lock &zone->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock &xa->xa_lock#8 &zone->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock &xa->xa_lock#8 &____s->seqcount irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock &xa->xa_lock#8 pool_lock#2 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock lock#4 &lruvec->lru_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &shmem->pages_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock fs_reclaim irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock pool_lock#2 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock free_vmap_area_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock vmap_area_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &____s->seqcount irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock init_mm.page_table_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &pcp->lock &zone->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &zone->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex reservation_ww_class_mutex &shmem->vmap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex tk_core.seq.seqcount irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock &dev->vbl_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock &____s->seqcount#6 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock &x->wait#14 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock &obj_hash[i].lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock pool_lock#2 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &dev->vbl_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &dev->vbl_lock &dev->vblank_time_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &dev->vbl_lock &dev->vblank_time_lock tk_core.seq.seqcount irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &dev->vbl_lock &dev->vblank_time_lock &(&vblank->seqlock)->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &dev->vbl_lock &dev->vblank_time_lock &(&vblank->seqlock)->lock &____s->seqcount#6 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &x->wait#14 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex (work_completion)(&vkms_state->composer_work) irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->damage_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock rcu_read_lock &pool->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&helper->damage_work) irq_context: 0 (wq_completion)events (work_completion)(&helper->damage_work) &helper->damage_lock irq_context: 0 (wq_completion)events (work_completion)(&helper->damage_work) &helper->lock irq_context: 0 (wq_completion)events (work_completion)(&helper->damage_work) &helper->lock &lock->wait_lock irq_context: 0 (wq_completion)events (work_completion)(&helper->damage_work) &helper->lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&helper->damage_work) &helper->lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&helper->damage_work) &helper->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &c->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &____s->seqcount irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex drm_connector_list_iter fs_reclaim irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex drm_connector_list_iter fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex drm_connector_list_iter pool_lock#2 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock &dev->vbl_lock &dev->vblank_time_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock &dev->vbl_lock &dev->vblank_time_lock &obj_hash[i].lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock &dev->vbl_lock &dev->vblank_time_lock hrtimer_bases.lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock &dev->vbl_lock &dev->vblank_time_lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock &dev->vbl_lock &dev->vblank_time_lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock &dev->vbl_lock &dev->vblank_time_lock tk_core.seq.seqcount irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &vkms_out->lock &dev->event_lock &dev->vblank_time_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &base->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &base->lock &obj_hash[i].lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &rq->__lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&group->avgs_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: hardirq &vkms_out->lock irq_context: hardirq &vkms_out->lock &dev->event_lock irq_context: hardirq &vkms_out->lock &dev->event_lock &dev->vblank_time_lock irq_context: hardirq &vkms_out->lock &dev->event_lock &dev->vblank_time_lock &(&vblank->seqlock)->lock irq_context: hardirq &vkms_out->lock &dev->event_lock &dev->vblank_time_lock &(&vblank->seqlock)->lock &____s->seqcount#6 irq_context: hardirq &vkms_out->lock &dev->event_lock &vblank->queue irq_context: hardirq &vkms_out->lock &dev->event_lock &____s->seqcount#6 irq_context: hardirq &vkms_out->lock &dev->event_lock &obj_hash[i].lock irq_context: hardirq &vkms_out->lock &dev->event_lock &base->lock irq_context: hardirq &vkms_out->lock &dev->event_lock &base->lock &obj_hash[i].lock irq_context: hardirq &vkms_out->lock &dev->event_lock &x->wait#14 irq_context: hardirq &vkms_out->lock &dev->event_lock &x->wait#14 &p->pi_lock irq_context: hardirq &vkms_out->lock &dev->event_lock &x->wait#14 &p->pi_lock &rq->__lock irq_context: hardirq &vkms_out->lock &dev->event_lock &x->wait#14 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: hardirq &vkms_out->lock &dev->event_lock &x->wait#14 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: hardirq &vkms_out->lock &dev->event_lock pool_lock#2 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex (&timer.timer) irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex (work_completion)(&vkms_state->composer_work)#2 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &pcp->lock &zone->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex &zone->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire crtc_ww_class_mutex rcu_read_lock pool_lock#2 irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &lock->wait_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &p->pi_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &p->pi_lock &rq->__lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&helper->damage_work) &helper->lock reservation_ww_class_mutex irq_context: 0 (wq_completion)events (work_completion)(&helper->damage_work) &helper->lock reservation_ww_class_mutex &shmem->vmap_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock batched_entropy_u8.lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock kfence_freelist_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock vt_event_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &meta->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock (console_sem).lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock console_owner_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock &helper->lock &dev->master_mutex &client->modeset_mutex crtc_ww_class_acquire &c->lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock console_srcu console_owner_lock irq_context: 0 &dev->clientlist_mutex registration_lock console_lock console_srcu console_owner irq_context: 0 &dev->clientlist_mutex registration_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 &dev->clientlist_mutex registration_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 &dev->clientlist_mutex (console_sem).lock irq_context: 0 &dev->clientlist_mutex console_lock console_srcu console_owner_lock irq_context: 0 &dev->clientlist_mutex console_lock console_srcu console_owner irq_context: 0 &dev->clientlist_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 &dev->clientlist_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 &dev->clientlist_mutex kernel_fb_helper_lock irq_context: 0 &dev->queue_lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex lock kernfs_idr_lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &root->kernfs_rwsem irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &k->k_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up lock kernfs_idr_lock pool_lock#2 irq_context: 0 blk_queue_ida.xa_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) cpu_hotplug_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) cpu_hotplug_lock wq_pool_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) cpu_hotplug_lock wq_pool_mutex fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) cpu_hotplug_lock wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) cpu_hotplug_lock wq_pool_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) cpu_hotplug_lock wq_pool_mutex &zone->lock irq_context: 0 bio_slab_lock bio_slabs.xa_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) cpu_hotplug_lock wq_pool_mutex &____s->seqcount irq_context: 0 bio_slab_lock bio_slabs.xa_lock &zone->lock irq_context: 0 bio_slab_lock bio_slabs.xa_lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) cpu_hotplug_lock wq_pool_mutex pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) cpu_hotplug_lock wq_pool_mutex rcu_read_lock pool_lock#2 irq_context: 0 bio_slab_lock bio_slabs.xa_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock irq_context: 0 bio_slab_lock bio_slabs.xa_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) cpu_hotplug_lock wq_pool_mutex &wq->mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) cpu_hotplug_lock wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) wq_pool_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) wq_pool_mutex &wq->mutex irq_context: 0 &sb->s_type->i_lock_key#3 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &n->list_lock irq_context: 0 &xa->xa_lock#9 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) pcpu_alloc_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) pcpu_alloc_mutex pcpu_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) batched_entropy_u32.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) mmu_notifier_invalidate_range_start irq_context: 0 lock &q->queue_lock irq_context: 0 lock &q->queue_lock &blkcg->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) blk_queue_ida.xa_lock irq_context: 0 &q->queue_lock irq_context: 0 &q->queue_lock pool_lock#2 irq_context: 0 &q->queue_lock pcpu_lock irq_context: 0 &q->queue_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock irq_context: 0 &q->queue_lock percpu_counters_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock &q->unused_hctx_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock &n->list_lock irq_context: 0 &bdev->bd_size_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock cpu_hotplug_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock cpu_hotplug_lock cpuhp_state_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock &xa->xa_lock#10 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &set->tag_list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) bio_slab_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) percpu_counters_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &sb->s_type->i_lock_key#3 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &s->s_inode_list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &xa->xa_lock#9 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) lock &q->queue_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) lock &q->queue_lock &blkcg->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) lock &q->queue_lock &blkcg->lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->mq_freeze_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->mq_freeze_lock percpu_ref_switch_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->mq_freeze_lock percpu_ref_switch_lock rcu_read_lock &q->mq_freeze_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) percpu_ref_switch_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->queue_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->queue_lock pool_lock#2 irq_context: 0 subsys mutex#37 irq_context: 0 subsys mutex#37 &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->mq_freeze_lock &q->mq_freeze_wq irq_context: 0 dev_hotplug_mutex irq_context: 0 dev_hotplug_mutex &dev->power.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->queue_lock pcpu_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->queue_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->queue_lock percpu_counters_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->queue_lock &obj_hash[i].lock pool_lock irq_context: 0 &q->sysfs_dir_lock irq_context: 0 &q->sysfs_dir_lock fs_reclaim irq_context: 0 &q->sysfs_dir_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &q->sysfs_dir_lock pool_lock#2 irq_context: 0 &q->sysfs_dir_lock lock irq_context: 0 &q->sysfs_dir_lock lock kernfs_idr_lock irq_context: 0 &q->sysfs_dir_lock &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &x->wait#9 irq_context: 0 &q->sysfs_dir_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &q->sysfs_dir_lock &c->lock irq_context: 0 &q->sysfs_dir_lock &pcp->lock &zone->lock irq_context: 0 &q->sysfs_dir_lock &zone->lock irq_context: 0 &q->sysfs_dir_lock &____s->seqcount irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex pin_fs_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 percpu_ref_switch_lock irq_context: 0 subsys mutex#38 irq_context: 0 subsys mutex#38 &k->k_lock irq_context: 0 cgwb_lock irq_context: 0 bdi_lock irq_context: 0 inode_hash_lock irq_context: 0 inode_hash_lock &sb->s_type->i_lock_key#3 irq_context: 0 bdev_lock irq_context: 0 &disk->open_mutex irq_context: 0 &disk->open_mutex fs_reclaim irq_context: 0 &disk->open_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &disk->open_mutex pool_lock#2 irq_context: 0 &disk->open_mutex free_vmap_area_lock irq_context: 0 &disk->open_mutex vmap_area_lock irq_context: 0 &disk->open_mutex &____s->seqcount irq_context: 0 &disk->open_mutex init_mm.page_table_lock irq_context: 0 &disk->open_mutex &pcp->lock &zone->lock irq_context: 0 &disk->open_mutex &zone->lock irq_context: 0 &disk->open_mutex &xa->xa_lock#8 irq_context: 0 &disk->open_mutex lock#4 irq_context: 0 &disk->open_mutex mmu_notifier_invalidate_range_start irq_context: 0 &disk->open_mutex &c->lock irq_context: 0 &disk->open_mutex &mapping->private_lock irq_context: 0 &disk->open_mutex tk_core.seq.seqcount irq_context: 0 &disk->open_mutex &ret->b_uptodate_lock irq_context: 0 &disk->open_mutex &obj_hash[i].lock irq_context: 0 &disk->open_mutex &xa->xa_lock#8 pool_lock#2 irq_context: 0 &disk->open_mutex rcu_read_lock pool_lock#2 irq_context: 0 &disk->open_mutex purge_vmap_area_lock irq_context: 0 &disk->open_mutex &sb->s_type->i_lock_key#3 irq_context: 0 &disk->open_mutex &sb->s_type->i_lock_key#3 &xa->xa_lock#8 irq_context: 0 &disk->open_mutex &sb->s_type->i_lock_key#3 &xa->xa_lock#8 &obj_hash[i].lock irq_context: 0 &disk->open_mutex &sb->s_type->i_lock_key#3 &xa->xa_lock#8 pool_lock#2 irq_context: 0 &disk->open_mutex lock#4 &lruvec->lru_lock irq_context: 0 &disk->open_mutex lock#5 irq_context: 0 &disk->open_mutex &lruvec->lru_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->sysfs_lock &obj_hash[i].lock pool_lock irq_context: 0 &q->sysfs_dir_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) major_names_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) major_names_lock fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) major_names_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) major_names_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) major_names_lock major_names_spinlock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) floppy_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) floppy_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) floppy_lock &base->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) floppy_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) rtc_lock irq_context: softirq rcu_callback percpu_ref_switch_waitq.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &wq->mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &wq->mutex &pool->lock/1 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &wq->mutex &x->wait#10 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex &irq_desc_lock_class irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex &irq_desc_lock_class vector_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex &irq_desc_lock_class ioapic_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex &irq_desc_lock_class mask_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex &irq_desc_lock_class mask_lock tmp_mask_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex &irq_desc_lock_class mask_lock tmp_mask_lock vector_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex &irq_desc_lock_class mask_lock tmp_mask_lock ioapic_lock irq_context: 0 &disk->open_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex &irq_desc_lock_class ioapic_lock i8259A_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) register_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &irq_desc_lock_class irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) proc_subdir_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) proc_inum_ida.xa_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) proc_subdir_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) resource_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &base->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &base->lock &obj_hash[i].lock irq_context: 0 &q->queue_lock &obj_hash[i].lock pool_lock irq_context: 0 &q->queue_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &q->queue_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &zone->lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 &q->queue_lock &c->lock irq_context: 0 &q->queue_lock &____s->seqcount irq_context: 0 &q->queue_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &q->queue_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &q->queue_lock fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 &q->queue_lock fill_pool_map-wait-type-override &zone->lock irq_context: 0 &q->sysfs_dir_lock &rq->__lock irq_context: 0 &disk->open_mutex purge_vmap_area_lock &obj_hash[i].lock irq_context: 0 &disk->open_mutex purge_vmap_area_lock pool_lock#2 irq_context: 0 &q->sysfs_dir_lock lock kernfs_idr_lock &c->lock irq_context: 0 &q->sysfs_dir_lock lock kernfs_idr_lock &____s->seqcount irq_context: 0 &disk->open_mutex &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) (&timer.timer) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) command_done.lock irq_context: softirq rcu_callback rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_callback rcu_read_lock pool_lock#2 irq_context: softirq rcu_callback percpu_counters_lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) purge_vmap_area_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) purge_vmap_area_lock pool_lock#2 irq_context: 0 loop_ctl_mutex irq_context: 0 loop_ctl_mutex fs_reclaim irq_context: 0 loop_ctl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 loop_ctl_mutex pool_lock#2 irq_context: 0 &q->sysfs_lock irq_context: 0 &q->sysfs_lock &q->unused_hctx_lock irq_context: 0 &q->sysfs_lock mmu_notifier_invalidate_range_start irq_context: 0 &q->sysfs_lock pool_lock#2 irq_context: 0 &q->sysfs_lock &obj_hash[i].lock irq_context: 0 &q->sysfs_lock cpu_hotplug_lock irq_context: 0 &q->sysfs_lock cpu_hotplug_lock cpuhp_state_mutex irq_context: 0 &q->sysfs_lock fs_reclaim irq_context: 0 &q->sysfs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &q->sysfs_lock &xa->xa_lock#10 irq_context: 0 &set->tag_list_lock irq_context: 0 &q->mq_freeze_lock irq_context: 0 &q->mq_freeze_lock percpu_ref_switch_lock irq_context: 0 &q->mq_freeze_lock percpu_ref_switch_lock rcu_read_lock &q->mq_freeze_wq irq_context: 0 &q->mq_freeze_lock &q->mq_freeze_wq irq_context: 0 &q->queue_lock &pcp->lock &zone->lock irq_context: 0 &q->queue_lock &zone->lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock fs_reclaim irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock pool_lock#2 irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock pcpu_alloc_mutex irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock pcpu_alloc_mutex pcpu_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &obj_hash[i].lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->mq_freeze_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->mq_freeze_lock percpu_ref_switch_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->mq_freeze_lock percpu_ref_switch_lock rcu_read_lock &q->mq_freeze_wq irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex percpu_ref_switch_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->mq_freeze_lock &q->mq_freeze_wq irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex pin_fs_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &stats->lock irq_context: 0 &q->sysfs_lock &c->lock irq_context: 0 &q->sysfs_lock &n->list_lock irq_context: 0 &q->sysfs_lock &pcp->lock &zone->lock irq_context: 0 &q->sysfs_lock &zone->lock irq_context: 0 &q->sysfs_lock &____s->seqcount irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &c->lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &____s->seqcount irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &zone->lock irq_context: 0 &q->sysfs_lock &obj_hash[i].lock pool_lock irq_context: 0 &q->sysfs_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &q->sysfs_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &pcp->lock &zone->lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &zone->lock irq_context: 0 &q->queue_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &q->sysfs_lock &rq->__lock irq_context: 0 &q->sysfs_dir_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &obj_hash[i].lock pool_lock irq_context: softirq &(&ops->cursor_work)->timer irq_context: softirq &(&ops->cursor_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&ops->cursor_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&ops->cursor_work)->timer rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq &(&ops->cursor_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&ops->cursor_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&ops->cursor_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&ops->cursor_work)->work) irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&ops->cursor_work)->work) (console_sem).lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&ops->cursor_work)->work) console_lock &helper->damage_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&ops->cursor_work)->work) console_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&ops->cursor_work)->work) console_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&ops->cursor_work)->work) console_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&ops->cursor_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&ops->cursor_work)->work) &base->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&ops->cursor_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 nbd_index_mutex irq_context: 0 nbd_index_mutex fs_reclaim irq_context: 0 nbd_index_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 nbd_index_mutex pool_lock#2 irq_context: 0 set->srcu irq_context: 0 (work_completion)(&(&q->requeue_work)->work) irq_context: 0 (work_completion)(&(&hctx->run_work)->work) irq_context: 0 &q->debugfs_mutex irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock pool_lock#2 irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock lock kernfs_idr_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &root->kernfs_rwsem irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex set->srcu irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 &tsk->futex_exit_mutex &rq->__lock irq_context: softirq rcu_callback rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq rcu_callback rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 batched_entropy_u8.lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 kfence_freelist_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &n->list_lock irq_context: 0 &q->sysfs_dir_lock &q->sysfs_lock &n->list_lock &c->lock irq_context: 0 &q->sysfs_dir_lock lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 &q->sysfs_dir_lock lock kernfs_idr_lock &zone->lock irq_context: 0 zram_index_mutex irq_context: 0 zram_index_mutex fs_reclaim irq_context: 0 zram_index_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 zram_index_mutex pool_lock#2 irq_context: 0 zram_index_mutex blk_queue_ida.xa_lock irq_context: 0 zram_index_mutex &obj_hash[i].lock irq_context: 0 zram_index_mutex pcpu_alloc_mutex irq_context: 0 zram_index_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 zram_index_mutex bio_slab_lock irq_context: 0 zram_index_mutex &c->lock irq_context: 0 zram_index_mutex &____s->seqcount irq_context: 0 zram_index_mutex &obj_hash[i].lock pool_lock irq_context: 0 zram_index_mutex percpu_counters_lock irq_context: 0 zram_index_mutex mmu_notifier_invalidate_range_start irq_context: 0 zram_index_mutex &sb->s_type->i_lock_key#3 irq_context: 0 zram_index_mutex &s->s_inode_list_lock irq_context: 0 zram_index_mutex &xa->xa_lock#9 irq_context: 0 zram_index_mutex lock irq_context: 0 zram_index_mutex lock &q->queue_lock irq_context: 0 zram_index_mutex lock &q->queue_lock &blkcg->lock irq_context: 0 zram_index_mutex &q->queue_lock irq_context: 0 zram_index_mutex &q->queue_lock pool_lock#2 irq_context: 0 zram_index_mutex &q->queue_lock pcpu_lock irq_context: 0 zram_index_mutex &q->queue_lock &obj_hash[i].lock irq_context: 0 zram_index_mutex &q->queue_lock percpu_counters_lock irq_context: 0 zram_index_mutex &q->queue_lock &obj_hash[i].lock pool_lock irq_context: 0 zram_index_mutex &q->queue_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 zram_index_mutex &q->queue_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 zram_index_mutex &x->wait#9 irq_context: 0 zram_index_mutex &bdev->bd_size_lock irq_context: 0 zram_index_mutex &k->list_lock irq_context: 0 zram_index_mutex gdp_mutex irq_context: 0 zram_index_mutex gdp_mutex &k->list_lock irq_context: 0 zram_index_mutex lock kernfs_idr_lock irq_context: 0 zram_index_mutex &root->kernfs_rwsem irq_context: 0 zram_index_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 zram_index_mutex bus_type_sem irq_context: 0 zram_index_mutex sysfs_symlink_target_lock irq_context: 0 zram_index_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 zram_index_mutex &root->kernfs_rwsem irq_context: 0 zram_index_mutex &dev->power.lock irq_context: 0 zram_index_mutex dpm_list_mtx irq_context: 0 zram_index_mutex req_lock irq_context: 0 zram_index_mutex &p->pi_lock irq_context: 0 zram_index_mutex &p->pi_lock &rq->__lock irq_context: 0 zram_index_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 zram_index_mutex &rq->__lock irq_context: 0 zram_index_mutex &x->wait#11 irq_context: 0 zram_index_mutex subsys mutex#37 irq_context: 0 zram_index_mutex subsys mutex#37 &k->k_lock irq_context: 0 zram_index_mutex dev_hotplug_mutex irq_context: 0 zram_index_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 zram_index_mutex &q->sysfs_dir_lock irq_context: 0 zram_index_mutex &q->sysfs_dir_lock fs_reclaim irq_context: 0 zram_index_mutex &q->sysfs_dir_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 zram_index_mutex &q->sysfs_dir_lock pool_lock#2 irq_context: 0 zram_index_mutex &q->sysfs_dir_lock lock irq_context: 0 zram_index_mutex &q->sysfs_dir_lock lock kernfs_idr_lock irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &root->kernfs_rwsem irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &c->lock irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &____s->seqcount irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex pin_fs_lock irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 zram_index_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 zram_index_mutex percpu_ref_switch_lock irq_context: 0 zram_index_mutex &pcp->lock &zone->lock irq_context: 0 zram_index_mutex &zone->lock irq_context: 0 zram_index_mutex uevent_sock_mutex irq_context: 0 zram_index_mutex rcu_read_lock &pool->lock/1 irq_context: 0 zram_index_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 zram_index_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 zram_index_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 zram_index_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 zram_index_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 zram_index_mutex running_helpers_waitq.lock irq_context: 0 zram_index_mutex subsys mutex#38 irq_context: 0 zram_index_mutex subsys mutex#38 &k->k_lock irq_context: 0 zram_index_mutex cgwb_lock irq_context: 0 zram_index_mutex pin_fs_lock irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 zram_index_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 zram_index_mutex bdi_lock irq_context: 0 zram_index_mutex inode_hash_lock irq_context: 0 zram_index_mutex inode_hash_lock &sb->s_type->i_lock_key#3 irq_context: 0 zram_index_mutex (console_sem).lock irq_context: 0 zram_index_mutex console_lock console_srcu console_owner_lock irq_context: 0 zram_index_mutex console_lock console_srcu console_owner irq_context: 0 zram_index_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 zram_index_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 gdp_mutex &pcp->lock &zone->lock irq_context: 0 gdp_mutex &zone->lock irq_context: 0 subsys mutex#39 irq_context: 0 subsys mutex#39 &k->k_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]#2 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]#2 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]#2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]#2 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]#2 configfs_dirent_lock irq_context: 0 &q->sysfs_lock &xa->xa_lock#10 pool_lock#2 irq_context: 0 &lock irq_context: 0 &lock nullb_indexes.xa_lock irq_context: 0 &q->sysfs_dir_lock &obj_hash[i].lock irq_context: 0 &disk->open_mutex rcu_read_lock tk_core.seq.seqcount irq_context: 0 &disk->open_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 &disk->open_mutex rcu_read_lock &base->lock irq_context: 0 &disk->open_mutex rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 &disk->open_mutex rcu_read_lock &ret->b_uptodate_lock irq_context: 0 ctx_list.lock irq_context: 0 nfc_index_ida.xa_lock irq_context: 0 nfc_devlist_mutex irq_context: 0 nfc_devlist_mutex fs_reclaim irq_context: 0 nfc_devlist_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 nfc_devlist_mutex pool_lock#2 irq_context: 0 nfc_devlist_mutex &k->list_lock irq_context: 0 nfc_devlist_mutex gdp_mutex irq_context: 0 nfc_devlist_mutex gdp_mutex &k->list_lock irq_context: 0 nfc_devlist_mutex gdp_mutex fs_reclaim irq_context: 0 nfc_devlist_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 nfc_devlist_mutex gdp_mutex pool_lock#2 irq_context: 0 nfc_devlist_mutex gdp_mutex lock irq_context: 0 nfc_devlist_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 nfc_devlist_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 nfc_devlist_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 nfc_devlist_mutex lock irq_context: 0 nfc_devlist_mutex lock kernfs_idr_lock irq_context: 0 nfc_devlist_mutex &root->kernfs_rwsem irq_context: 0 nfc_devlist_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 nfc_devlist_mutex bus_type_sem irq_context: 0 nfc_devlist_mutex &pcp->lock &zone->lock irq_context: 0 nfc_devlist_mutex &zone->lock irq_context: 0 nfc_devlist_mutex &____s->seqcount irq_context: 0 nfc_devlist_mutex sysfs_symlink_target_lock irq_context: 0 nfc_devlist_mutex &root->kernfs_rwsem irq_context: 0 nfc_devlist_mutex &dev->power.lock irq_context: 0 nfc_devlist_mutex dpm_list_mtx irq_context: 0 nfc_devlist_mutex uevent_sock_mutex irq_context: 0 nfc_devlist_mutex &obj_hash[i].lock irq_context: 0 nfc_devlist_mutex rcu_read_lock &pool->lock/1 irq_context: 0 nfc_devlist_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 nfc_devlist_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 nfc_devlist_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 nfc_devlist_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 nfc_devlist_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nfc_devlist_mutex running_helpers_waitq.lock irq_context: 0 nfc_devlist_mutex &rq->__lock irq_context: 0 nfc_devlist_mutex subsys mutex#40 irq_context: 0 nfc_devlist_mutex subsys mutex#40 &k->k_lock irq_context: 0 &dev->mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &dev->mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 &dev->mutex rfkill_global_mutex irq_context: 0 &dev->mutex rfkill_global_mutex fs_reclaim irq_context: 0 &dev->mutex rfkill_global_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex rfkill_global_mutex pool_lock#2 irq_context: 0 &dev->mutex rfkill_global_mutex &c->lock irq_context: 0 &dev->mutex rfkill_global_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex rfkill_global_mutex &zone->lock irq_context: 0 &dev->mutex rfkill_global_mutex &____s->seqcount irq_context: 0 &dev->mutex rfkill_global_mutex &k->list_lock irq_context: 0 &dev->mutex rfkill_global_mutex lock irq_context: 0 &dev->mutex rfkill_global_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex rfkill_global_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex rfkill_global_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex rfkill_global_mutex bus_type_sem irq_context: 0 &dev->mutex rfkill_global_mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex rfkill_global_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex rfkill_global_mutex &dev->power.lock irq_context: 0 &dev->mutex rfkill_global_mutex dpm_list_mtx irq_context: 0 &dev->mutex rfkill_global_mutex &rfkill->lock irq_context: 0 &dev->mutex rfkill_global_mutex uevent_sock_mutex irq_context: 0 &dev->mutex rfkill_global_mutex &n->list_lock irq_context: 0 &dev->mutex rfkill_global_mutex &n->list_lock &c->lock irq_context: 0 &dev->mutex rfkill_global_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex rfkill_global_mutex running_helpers_waitq.lock irq_context: 0 &dev->mutex rfkill_global_mutex &rq->__lock irq_context: 0 &dev->mutex rfkill_global_mutex &k->k_lock irq_context: 0 &dev->mutex rfkill_global_mutex subsys mutex#41 irq_context: 0 &dev->mutex rfkill_global_mutex subsys mutex#41 &k->k_lock irq_context: 0 &dev->mutex rfkill_global_mutex triggers_list_lock irq_context: 0 &dev->mutex rfkill_global_mutex leds_list_lock irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) rfkill_global_mutex irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) rfkill_global_mutex rfkill_global_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) rfkill_global_mutex &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) rfkill_global_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) rfkill_global_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) rfkill_global_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) rfkill_global_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) rfkill_global_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex rfkill_global_mutex.wait_lock irq_context: 0 &dev->mutex &p->pi_lock irq_context: 0 &dev->mutex &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rfkill_global_led_trigger_work) rfkill_global_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill_global_led_trigger_work) &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) rfkill_global_mutex &rfkill->lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) rfkill_global_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) rfkill_global_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &pool->lock &base->lock irq_context: 0 &pool->lock &base->lock &obj_hash[i].lock irq_context: 0 &dev->mutex rfkill_global_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex rfkill_global_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex rfkill_global_mutex rcu_read_lock pool_lock#2 irq_context: 0 misc_mtx fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 misc_mtx fill_pool_map-wait-type-override pool_lock irq_context: 0 dma_heap_minors.xa_lock irq_context: 0 subsys mutex#42 irq_context: 0 subsys mutex#42 &k->k_lock irq_context: 0 heap_list_lock irq_context: softirq &(&krcp->monitor_work)->timer irq_context: softirq &(&krcp->monitor_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&krcp->monitor_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&krcp->monitor_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&krcp->monitor_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&krcp->monitor_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&tbl->managed_work)->timer irq_context: softirq &(&tbl->managed_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&tbl->managed_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&tbl->managed_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) krc.lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback pool_lock#2 irq_context: 0 &dev->mutex host_index_ida.xa_lock irq_context: 0 &dev->mutex fs_reclaim &rq->__lock irq_context: 0 &dev->mutex fs_reclaim &cfs_rq->removed.lock irq_context: 0 &dev->mutex fs_reclaim &obj_hash[i].lock irq_context: 0 &dev->mutex kthread_create_lock irq_context: 0 &dev->mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex &x->wait irq_context: 0 &dev->mutex cpu_hotplug_lock wq_pool_mutex irq_context: 0 &dev->mutex cpu_hotplug_lock wq_pool_mutex fs_reclaim irq_context: 0 &dev->mutex cpu_hotplug_lock wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex cpu_hotplug_lock wq_pool_mutex pool_lock#2 irq_context: 0 &dev->mutex cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex cpu_hotplug_lock wq_pool_mutex &wq->mutex irq_context: 0 &dev->mutex cpu_hotplug_lock wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 &dev->mutex subsys mutex#9 irq_context: 0 &dev->mutex wq_pool_mutex irq_context: 0 &dev->mutex wq_pool_mutex &wq->mutex irq_context: 0 &dev->mutex &md->mutex &c->lock irq_context: 0 &dev->mutex &md->mutex &____s->seqcount irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock &n->list_lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex &n->list_lock irq_context: 0 &dev->mutex &md->mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex &md->mutex &zone->lock irq_context: 0 &dev->mutex &desc->request_mutex &irq_desc_lock_class tmp_mask_lock irq_context: 0 &dev->mutex &desc->request_mutex &irq_desc_lock_class tmp_mask_lock vector_lock irq_context: 0 &dev->mutex scsi_sense_cache_mutex irq_context: 0 &dev->mutex scsi_sense_cache_mutex slab_mutex irq_context: 0 &dev->mutex scsi_sense_cache_mutex slab_mutex fs_reclaim irq_context: 0 &dev->mutex scsi_sense_cache_mutex slab_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex scsi_sense_cache_mutex slab_mutex pool_lock#2 irq_context: 0 &dev->mutex scsi_sense_cache_mutex slab_mutex &c->lock irq_context: 0 &dev->mutex scsi_sense_cache_mutex slab_mutex &n->list_lock irq_context: 0 &dev->mutex scsi_sense_cache_mutex slab_mutex pcpu_alloc_mutex irq_context: 0 &dev->mutex scsi_sense_cache_mutex slab_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 &dev->mutex &n->list_lock irq_context: 0 &dev->mutex pcpu_alloc_mutex irq_context: 0 &dev->mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 &dev->mutex batched_entropy_u32.lock irq_context: 0 &dev->mutex &dev->power.lock &dev->power.lock/1 irq_context: 0 &dev->mutex subsys mutex#43 irq_context: 0 &dev->mutex subsys mutex#44 irq_context: 0 &dev->mutex subsys mutex#44 &k->k_lock irq_context: 0 &dev->mutex attribute_container_mutex irq_context: 0 &dev->mutex &dev->power.lock &dev->power.wait_queue irq_context: 0 &dev->mutex &virtscsi_vq->vq_lock irq_context: 0 &dev->mutex &shost->scan_mutex irq_context: 0 &dev->mutex &shost->scan_mutex fs_reclaim irq_context: 0 &dev->mutex &shost->scan_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &shost->scan_mutex pool_lock#2 irq_context: 0 &dev->mutex &shost->scan_mutex shost->host_lock irq_context: 0 &dev->mutex async_scan_lock irq_context: 0 &dev->mutex async_scan_lock &x->wait#15 irq_context: 0 &dev->mutex async_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->power.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &x->wait#9 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex shost->host_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex attribute_container_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex blk_queue_ida.xa_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex pcpu_alloc_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock &q->unused_hctx_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock &n->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock cpu_hotplug_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock cpu_hotplug_lock cpuhp_state_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock &xa->xa_lock#10 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &set->tag_list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex batched_entropy_u32.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex tk_core.seq.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &hctx->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &hctx->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &base->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &x->wait#16 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &base->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &rq->__lock irq_context: hardirq &virtscsi_vq->vq_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &x->wait#16 irq_context: softirq &x->wait#16 &p->pi_lock irq_context: softirq &x->wait#16 &p->pi_lock &rq->__lock irq_context: softirq &x->wait#16 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &x->wait#16 &p->pi_lock &cfs_rq->removed.lock irq_context: softirq &x->wait#16 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex (&timer.timer) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &sdev->state_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->mq_freeze_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->mq_freeze_lock percpu_ref_switch_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->mq_freeze_lock percpu_ref_switch_lock rcu_read_lock &q->mq_freeze_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->mq_freeze_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex percpu_ref_switch_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex (&q->timeout) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex (work_completion)(&q->timeout_work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex (work_completion)(&(&q->requeue_work)->work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex (work_completion)(&(&hctx->run_work)->work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex cpu_hotplug_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex cpu_hotplug_lock cpuhp_state_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &xa->xa_lock#10 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->unused_hctx_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex (work_completion)(&sdev->requeue_work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex (work_completion)(&sdev->event_work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex pcpu_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &sdev->inquiry_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex quarantine_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock batched_entropy_u32.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex (console_sem).lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex console_lock console_srcu console_owner irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &set->tag_list_lock &q->mq_freeze_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &set->tag_list_lock &q->mq_freeze_lock percpu_ref_switch_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &set->tag_list_lock &q->mq_freeze_lock percpu_ref_switch_lock rcu_read_lock &q->mq_freeze_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &set->tag_list_lock percpu_ref_switch_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &set->tag_list_lock &q->mq_freeze_lock &q->mq_freeze_wq irq_context: 0 subsys mutex#45 irq_context: 0 subsys mutex#45 &k->list_lock irq_context: 0 subsys mutex#45 &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &tags->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nvmf_hosts_mutex irq_context: 0 subsys mutex#46 irq_context: 0 subsys mutex#46 &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex (work_completion)(&(&hctx->run_work)->work) &rq->__lock irq_context: 0 nvmf_transports_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock rcu_node_0 irq_context: 0 subsys mutex#47 irq_context: 0 subsys mutex#47 &k->k_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 configfs_dirent_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &sb->s_type->i_lock_key#19 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &s->s_inode_list_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &sb->s_type->i_lock_key#19 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &c->lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &pcp->lock &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &____s->seqcount irq_context: 0 nvmet_config_sem irq_context: 0 subsys mutex#48 irq_context: 0 subsys mutex#48 &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex fill_pool_map-wait-type-override &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#7/1 &pcp->lock &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 configfs_dirent_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &sb->s_type->i_lock_key#19 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &s->s_inode_list_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &sb->s_type->i_lock_key#19 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 configfs_dirent_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &c->lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &pcp->lock &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &sb->s_type->i_lock_key#19 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &s->s_inode_list_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &sb->s_type->i_lock_key#19 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 configfs_dirent_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 &sb->s_type->i_lock_key#19 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 &s->s_inode_list_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 &sb->s_type->i_lock_key#19 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 &default_group_class[depth - 1]#6 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 &default_group_class[depth - 1]#6 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 &default_group_class[depth - 1]#6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 &default_group_class[depth - 1]#6 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 &default_group_class[depth - 1]#6 configfs_dirent_lock irq_context: 0 &sb->s_type->i_mutex_key#7/1 &default_group_class[depth - 1]/2 &default_group_class[depth - 1]#3/2 &default_group_class[depth - 1]#4/2 &default_group_class[depth - 1]#5/2 &default_group_class[depth - 1]#6/2 irq_context: 0 backend_mutex irq_context: 0 scsi_mib_index_lock irq_context: 0 hba_lock irq_context: 0 device_mutex irq_context: 0 device_mutex fs_reclaim irq_context: 0 device_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 device_mutex pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock fill_pool_map-wait-type-override &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: softirq rcu_callback percpu_ref_switch_lock irq_context: 0 &hba->device_lock irq_context: 0 rcu_read_lock init_fs.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key irq_context: 0 &sb->s_type->i_mutex_key fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key rcu_read_lock rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key &dentry->d_lock &wq irq_context: 0 mtd_table_mutex irq_context: 0 part_parser_lock irq_context: 0 (kmod_concurrent_max).lock irq_context: 0 &x->wait#17 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &sig->wait_chldexit irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &base->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 tasklist_lock &sighand->siglock &sig->wait_chldexit &p->pi_lock irq_context: 0 tasklist_lock &sighand->siglock &sig->wait_chldexit &p->pi_lock &cfs_rq->removed.lock irq_context: 0 tasklist_lock &sighand->siglock &sig->wait_chldexit &p->pi_lock &rq->__lock irq_context: 0 tasklist_lock &sighand->siglock &sig->wait_chldexit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tasklist_lock &sighand->siglock &sig->wait_chldexit &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) rcu_read_lock &____s->seqcount#5 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &prev->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &sighand->siglock &(&sig->stats_lock)->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) css_set_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &sighand->siglock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &sighand->siglock input_pool.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &x->wait#17 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &x->wait#17 &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &x->wait#17 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &x->wait#17 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->mq_freeze_lock &rq->__lock irq_context: 0 mtd_table_mutex fs_reclaim irq_context: 0 mtd_table_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 mtd_table_mutex pool_lock#2 irq_context: 0 mtd_table_mutex &x->wait#9 irq_context: 0 mtd_table_mutex &obj_hash[i].lock irq_context: 0 mtd_table_mutex &k->list_lock irq_context: 0 mtd_table_mutex gdp_mutex irq_context: 0 mtd_table_mutex gdp_mutex &k->list_lock irq_context: 0 mtd_table_mutex gdp_mutex fs_reclaim irq_context: 0 mtd_table_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 mtd_table_mutex gdp_mutex pool_lock#2 irq_context: 0 mtd_table_mutex gdp_mutex lock irq_context: 0 mtd_table_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 mtd_table_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 mtd_table_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 mtd_table_mutex lock irq_context: 0 mtd_table_mutex lock kernfs_idr_lock irq_context: 0 mtd_table_mutex &root->kernfs_rwsem irq_context: 0 mtd_table_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 mtd_table_mutex bus_type_sem irq_context: 0 mtd_table_mutex sysfs_symlink_target_lock irq_context: 0 mtd_table_mutex &c->lock irq_context: 0 mtd_table_mutex &____s->seqcount irq_context: 0 mtd_table_mutex &root->kernfs_rwsem irq_context: 0 mtd_table_mutex &dev->power.lock irq_context: 0 mtd_table_mutex dpm_list_mtx irq_context: 0 mtd_table_mutex req_lock irq_context: 0 mtd_table_mutex &p->pi_lock irq_context: 0 mtd_table_mutex &p->pi_lock &rq->__lock irq_context: 0 mtd_table_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock rcu_read_lock rcu_node_0 irq_context: 0 mtd_table_mutex &x->wait#11 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock rcu_read_lock &rq->__lock irq_context: 0 mtd_table_mutex &rq->__lock irq_context: 0 mtd_table_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 mtd_table_mutex uevent_sock_mutex irq_context: 0 mtd_table_mutex rcu_read_lock &pool->lock/1 irq_context: 0 mtd_table_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 mtd_table_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 mtd_table_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 mtd_table_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 mtd_table_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 mtd_table_mutex running_helpers_waitq.lock irq_context: 0 mtd_table_mutex subsys mutex#49 irq_context: 0 mtd_table_mutex subsys mutex#49 &k->k_lock irq_context: 0 mtd_table_mutex devtree_lock irq_context: 0 mtd_table_mutex nvmem_ida.xa_lock irq_context: 0 mtd_table_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 mtd_table_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 mtd_table_mutex nvmem_cell_mutex irq_context: 0 mtd_table_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 mtd_table_mutex &k->k_lock irq_context: 0 mtd_table_mutex &pcp->lock &zone->lock irq_context: 0 mtd_table_mutex &zone->lock irq_context: 0 mtd_table_mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock &rq->__lock irq_context: 0 mtd_table_mutex &dev->mutex &dev->power.lock irq_context: 0 mtd_table_mutex &dev->mutex &k->list_lock irq_context: 0 mtd_table_mutex &dev->mutex &k->k_lock irq_context: 0 mtd_table_mutex subsys mutex#50 irq_context: 0 mtd_table_mutex pin_fs_lock irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 mtd_table_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 mtd_table_mutex &obj_hash[i].lock pool_lock irq_context: 0 mtd_table_mutex (console_sem).lock irq_context: 0 mtd_table_mutex console_lock console_srcu console_owner_lock irq_context: 0 mtd_table_mutex console_lock console_srcu console_owner irq_context: 0 mtd_table_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 mtd_table_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 mtd_table_mutex pcpu_alloc_mutex irq_context: 0 mtd_table_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 mtd_table_mutex cpu_hotplug_lock irq_context: 0 mtd_table_mutex batched_entropy_u32.lock irq_context: 0 mtd_table_mutex mmu_notifier_invalidate_range_start irq_context: 0 mtd_table_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 mtd_table_mutex blk_queue_ida.xa_lock irq_context: 0 mtd_table_mutex &q->sysfs_lock irq_context: 0 mtd_table_mutex &q->sysfs_lock &q->unused_hctx_lock irq_context: 0 mtd_table_mutex &q->sysfs_lock mmu_notifier_invalidate_range_start irq_context: 0 mtd_table_mutex &q->sysfs_lock pool_lock#2 irq_context: 0 mtd_table_mutex &q->sysfs_lock &obj_hash[i].lock irq_context: 0 mtd_table_mutex &q->sysfs_lock cpu_hotplug_lock irq_context: 0 mtd_table_mutex &q->sysfs_lock cpu_hotplug_lock cpuhp_state_mutex irq_context: 0 mtd_table_mutex &q->sysfs_lock fs_reclaim irq_context: 0 mtd_table_mutex &q->sysfs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 mtd_table_mutex &q->sysfs_lock &xa->xa_lock#10 irq_context: 0 mtd_table_mutex &set->tag_list_lock irq_context: 0 mtd_table_mutex bio_slab_lock irq_context: 0 mtd_table_mutex percpu_counters_lock irq_context: 0 mtd_table_mutex &sb->s_type->i_lock_key#3 irq_context: 0 mtd_table_mutex &s->s_inode_list_lock irq_context: 0 mtd_table_mutex &xa->xa_lock#9 irq_context: 0 mtd_table_mutex lock &q->queue_lock irq_context: 0 mtd_table_mutex lock &q->queue_lock &blkcg->lock irq_context: 0 mtd_table_mutex &q->mq_freeze_lock irq_context: 0 mtd_table_mutex &q->mq_freeze_lock percpu_ref_switch_lock irq_context: 0 mtd_table_mutex &q->mq_freeze_lock percpu_ref_switch_lock rcu_read_lock &q->mq_freeze_wq irq_context: 0 mtd_table_mutex set->srcu irq_context: 0 mtd_table_mutex percpu_ref_switch_lock irq_context: 0 mtd_table_mutex &q->queue_lock irq_context: 0 mtd_table_mutex &q->queue_lock pool_lock#2 irq_context: 0 mtd_table_mutex &q->mq_freeze_lock &q->mq_freeze_wq irq_context: 0 mtd_table_mutex &q->queue_lock pcpu_lock irq_context: 0 mtd_table_mutex &q->queue_lock &obj_hash[i].lock irq_context: 0 mtd_table_mutex &q->queue_lock percpu_counters_lock irq_context: 0 mtd_table_mutex &q->queue_lock &obj_hash[i].lock pool_lock irq_context: 0 mtd_table_mutex &q->queue_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 mtd_table_mutex &q->queue_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 mtd_table_mutex &q->queue_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 mtd_table_mutex &q->queue_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 mtd_table_mutex &bdev->bd_size_lock irq_context: 0 mtd_table_mutex elv_list_lock irq_context: 0 mtd_table_mutex (work_completion)(&(&q->requeue_work)->work) irq_context: 0 mtd_table_mutex (work_completion)(&(&hctx->run_work)->work) irq_context: 0 mtd_table_mutex &n->list_lock irq_context: 0 mtd_table_mutex &q->debugfs_mutex irq_context: 0 mtd_table_mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 mtd_table_mutex subsys mutex#37 irq_context: 0 mtd_table_mutex subsys mutex#37 &k->k_lock irq_context: 0 mtd_table_mutex dev_hotplug_mutex irq_context: 0 mtd_table_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock batched_entropy_u32.lock crngs.lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock fs_reclaim irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock pool_lock#2 irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock lock kernfs_idr_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &root->kernfs_rwsem irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &c->lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &pcp->lock &zone->lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &zone->lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &____s->seqcount irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex pin_fs_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &zone->lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock fs_reclaim irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock pool_lock#2 irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock lock kernfs_idr_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &root->kernfs_rwsem irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &c->lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &pcp->lock &zone->lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &zone->lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &____s->seqcount irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock pcpu_alloc_mutex irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock pcpu_alloc_mutex pcpu_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &obj_hash[i].lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->mq_freeze_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->mq_freeze_lock percpu_ref_switch_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->mq_freeze_lock percpu_ref_switch_lock rcu_read_lock &q->mq_freeze_wq irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex set->srcu irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex percpu_ref_switch_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->mq_freeze_lock &q->mq_freeze_wq irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex pin_fs_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &zone->lock irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 mtd_table_mutex &q->sysfs_dir_lock &q->sysfs_lock &stats->lock irq_context: 0 mtd_table_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex attribute_container_mutex &rq->__lock irq_context: 0 mtd_table_mutex subsys mutex#38 irq_context: 0 mtd_table_mutex subsys mutex#38 &k->k_lock irq_context: 0 mtd_table_mutex cgwb_lock irq_context: 0 mtd_table_mutex bdi_lock irq_context: 0 mtd_table_mutex inode_hash_lock irq_context: 0 mtd_table_mutex inode_hash_lock &sb->s_type->i_lock_key#3 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 rtnl_mutex stack_depot_init_mutex irq_context: 0 rtnl_mutex rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex cpu_hotplug_lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex fs_reclaim irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &c->lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &____s->seqcount irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex pool_lock#2 irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &wq->mutex irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 rtnl_mutex kthread_create_lock irq_context: 0 rtnl_mutex &p->pi_lock irq_context: 0 rtnl_mutex &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &x->wait irq_context: 0 rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex wq_pool_mutex irq_context: 0 rtnl_mutex wq_pool_mutex &wq->mutex irq_context: 0 rtnl_mutex crngs.lock irq_context: 0 rtnl_mutex &xa->xa_lock#3 irq_context: 0 rtnl_mutex net_rwsem irq_context: 0 rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 rtnl_mutex &x->wait#9 irq_context: 0 rtnl_mutex &k->list_lock irq_context: 0 rtnl_mutex gdp_mutex irq_context: 0 rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 rtnl_mutex lock irq_context: 0 rtnl_mutex lock kernfs_idr_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem irq_context: 0 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 rtnl_mutex bus_type_sem irq_context: 0 rtnl_mutex sysfs_symlink_target_lock irq_context: 0 rtnl_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 rtnl_mutex &root->kernfs_rwsem irq_context: 0 rtnl_mutex &dev->power.lock irq_context: 0 rtnl_mutex dpm_list_mtx irq_context: 0 rtnl_mutex uevent_sock_mutex irq_context: 0 rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex running_helpers_waitq.lock irq_context: 0 rtnl_mutex subsys mutex#17 irq_context: 0 rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 rtnl_mutex &dir->lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock cpu_hotplug_lock &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 rtnl_mutex uevent_sock_mutex &____s->seqcount irq_context: 0 rtnl_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock batched_entropy_u8.lock irq_context: 0 rtnl_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock kfence_freelist_lock irq_context: 0 rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex dev_hotplug_mutex irq_context: 0 rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 rtnl_mutex dev_base_lock irq_context: 0 rtnl_mutex input_pool.lock irq_context: 0 rtnl_mutex pcpu_alloc_mutex irq_context: 0 rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 rtnl_mutex batched_entropy_u32.lock irq_context: 0 rtnl_mutex &tbl->lock irq_context: 0 rtnl_mutex sysctl_lock irq_context: 0 rtnl_mutex nl_table_lock irq_context: 0 rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)gid-cache-wq irq_context: 0 rtnl_mutex rcu_read_lock &bond->stats_lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) &obj_hash[i].lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) pool_lock#2 irq_context: 0 rtnl_mutex lweventlist_lock irq_context: 0 rtnl_mutex lweventlist_lock pool_lock#2 irq_context: 0 rtnl_mutex lweventlist_lock &dir->lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &meta->lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex kfence_freelist_lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex.wait_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex lweventlist_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex lweventlist_lock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex lweventlist_lock &dir->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex dev_base_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex proc_subdir_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex proc_subdir_lock irq_context: 0 once_lock irq_context: 0 once_lock crngs.lock irq_context: 0 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&w->work) irq_context: 0 (wq_completion)events (work_completion)(&w->work) cpu_hotplug_lock irq_context: 0 (wq_completion)events (work_completion)(&w->work) cpu_hotplug_lock jump_label_mutex irq_context: 0 (wq_completion)events (work_completion)(&w->work) cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 (wq_completion)events (work_completion)(&w->work) cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 (wq_completion)events (work_completion)(&w->work) &obj_hash[i].lock irq_context: 0 (inet6addr_validator_chain).rwsem irq_context: 0 (inetaddr_validator_chain).rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex stack_depot_init_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex crngs.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &zone->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &cfs_rq->removed.lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &zone->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 subsys mutex#51 irq_context: 0 subsys mutex#51 &k->k_lock irq_context: 0 gpio_lookup_lock irq_context: 0 mdio_board_lock irq_context: 0 mode_list_lock irq_context: 0 tasklist_lock &n->list_lock irq_context: 0 &dev->mutex stack_depot_init_mutex irq_context: 0 &dev->mutex napi_hash_lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex &md->mutex &domain->mutex sparse_irq_lock lock kernfs_idr_lock pool_lock#2 irq_context: softirq mm/memcontrol.c:589 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq mm/memcontrol.c:589 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex fs_reclaim &rq->__lock irq_context: 0 &dev->mutex cpu_hotplug_lock &md->mutex irq_context: 0 &dev->mutex cpu_hotplug_lock &irq_desc_lock_class irq_context: 0 &dev->mutex cpu_hotplug_lock xps_map_mutex irq_context: 0 &dev->mutex cpu_hotplug_lock xps_map_mutex fs_reclaim irq_context: 0 &dev->mutex cpu_hotplug_lock xps_map_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex cpu_hotplug_lock xps_map_mutex pool_lock#2 irq_context: 0 &dev->mutex cpu_hotplug_lock xps_map_mutex jump_label_mutex irq_context: 0 &dev->mutex cpu_hotplug_lock xps_map_mutex jump_label_mutex text_mutex irq_context: 0 &dev->mutex cpu_hotplug_lock xps_map_mutex jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 &dev->mutex cpu_hotplug_lock xps_map_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex cpu_hotplug_lock xps_map_mutex krc.lock irq_context: 0 &dev->mutex rtnl_mutex irq_context: 0 &dev->mutex rtnl_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex rtnl_mutex &zone->lock irq_context: 0 &dev->mutex rtnl_mutex &____s->seqcount irq_context: 0 &dev->mutex rtnl_mutex pool_lock#2 irq_context: hardirq &irq_desc_lock_class tmp_mask_lock irq_context: 0 &dev->mutex rtnl_mutex rcu_read_lock pool_lock#2 irq_context: hardirq &irq_desc_lock_class tmp_mask_lock vector_lock irq_context: 0 &dev->mutex rtnl_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex rtnl_mutex fs_reclaim irq_context: 0 &dev->mutex rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex rtnl_mutex &xa->xa_lock#3 irq_context: 0 &dev->mutex rtnl_mutex net_rwsem irq_context: 0 &dev->mutex rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 &dev->mutex rtnl_mutex &x->wait#9 irq_context: 0 &dev->mutex rtnl_mutex &k->list_lock irq_context: 0 &dev->mutex rtnl_mutex gdp_mutex irq_context: 0 &dev->mutex rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 &dev->mutex rtnl_mutex gdp_mutex fs_reclaim irq_context: 0 &dev->mutex rtnl_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex rtnl_mutex gdp_mutex pool_lock#2 irq_context: 0 &dev->mutex rtnl_mutex gdp_mutex lock irq_context: 0 &dev->mutex rtnl_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex rtnl_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex rtnl_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex rtnl_mutex gdp_mutex kobj_ns_type_lock irq_context: 0 &dev->mutex rtnl_mutex lock irq_context: 0 &dev->mutex rtnl_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex rtnl_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex rtnl_mutex bus_type_sem irq_context: 0 &dev->mutex rtnl_mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex rtnl_mutex &c->lock irq_context: 0 &dev->mutex rtnl_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex rtnl_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex rtnl_mutex &dev->power.lock irq_context: 0 &dev->mutex rtnl_mutex dpm_list_mtx irq_context: 0 &dev->mutex rtnl_mutex uevent_sock_mutex irq_context: 0 &dev->mutex rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 &dev->mutex rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex rtnl_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 &dev->mutex rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 &dev->mutex rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 &dev->mutex rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex rtnl_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex rtnl_mutex running_helpers_waitq.lock irq_context: 0 &dev->mutex rtnl_mutex &rq->__lock irq_context: 0 &dev->mutex rtnl_mutex &k->k_lock irq_context: 0 &dev->mutex rtnl_mutex subsys mutex#17 irq_context: 0 &dev->mutex rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 &dev->mutex rtnl_mutex &dir->lock#2 irq_context: 0 &dev->mutex rtnl_mutex batched_entropy_u8.lock irq_context: 0 &dev->mutex rtnl_mutex kfence_freelist_lock irq_context: 0 &dev->mutex rtnl_mutex dev_hotplug_mutex irq_context: 0 &dev->mutex rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 &dev->mutex rtnl_mutex dev_base_lock irq_context: 0 &dev->mutex rtnl_mutex input_pool.lock irq_context: 0 &dev->mutex rtnl_mutex pcpu_alloc_mutex irq_context: 0 &dev->mutex rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 &dev->mutex rtnl_mutex batched_entropy_u32.lock irq_context: 0 &dev->mutex rtnl_mutex &tbl->lock irq_context: 0 &dev->mutex rtnl_mutex sysctl_lock irq_context: 0 &dev->mutex rtnl_mutex nl_table_lock irq_context: 0 &dev->mutex rtnl_mutex nl_table_wait.lock irq_context: hardirq &irq_desc_lock_class vector_lock irq_context: 0 &dev->mutex cpu_hotplug_lock cpuhp_state_mutex irq_context: 0 &dev->mutex lweventlist_lock irq_context: 0 &dev->mutex lweventlist_lock pool_lock#2 irq_context: 0 &dev->mutex lweventlist_lock &dir->lock#2 irq_context: 0 &dev->mutex &base->lock &obj_hash[i].lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &dev->mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&vi->config_work) irq_context: 0 l3mdev_lock irq_context: 0 subsys mutex#52 irq_context: 0 subsys mutex#52 &k->k_lock irq_context: 0 compressor_list_lock irq_context: 0 compressor_list_lock pool_lock#2 irq_context: 0 compressor_list_lock &pcp->lock &zone->lock irq_context: 0 compressor_list_lock &zone->lock irq_context: 0 compressor_list_lock &____s->seqcount irq_context: 0 compressor_list_lock rcu_read_lock pool_lock#2 irq_context: 0 compressor_list_lock &obj_hash[i].lock irq_context: 0 uevent_sock_mutex &rq->__lock irq_context: 0 uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem hwsim_netgroup_ida.xa_lock irq_context: 0 hwsim_radio_lock irq_context: 0 subsys mutex#53 irq_context: 0 subsys mutex#53 &k->k_lock irq_context: 0 deferred_probe_mutex irq_context: 0 rtnl_mutex param_lock irq_context: 0 rtnl_mutex param_lock rate_ctrl_mutex irq_context: 0 rtnl_mutex (console_sem).lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx irq_context: 0 rtnl_mutex &rdev->wiphy.mtx fs_reclaim irq_context: 0 rtnl_mutex &rdev->wiphy.mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &rdev->wiphy.mtx pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &k->list_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx gdp_mutex irq_context: 0 rtnl_mutex &rdev->wiphy.mtx gdp_mutex &k->list_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx gdp_mutex fs_reclaim irq_context: 0 rtnl_mutex &rdev->wiphy.mtx gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &rdev->wiphy.mtx gdp_mutex pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx gdp_mutex lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx gdp_mutex lock kernfs_idr_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx gdp_mutex &root->kernfs_rwsem irq_context: 0 rtnl_mutex &rdev->wiphy.mtx gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 rtnl_mutex &rdev->wiphy.mtx gdp_mutex kobj_ns_type_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx lock kernfs_idr_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 rtnl_mutex &rdev->wiphy.mtx bus_type_sem irq_context: 0 rtnl_mutex &rdev->wiphy.mtx sysfs_symlink_target_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &c->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &pcp->lock &zone->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &zone->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &____s->seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &dev->power.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx dpm_list_mtx irq_context: 0 rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex irq_context: 0 rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex fs_reclaim irq_context: 0 rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex nl_table_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex nl_table_wait.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx running_helpers_waitq.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &k->k_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx subsys mutex#54 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx subsys mutex#54 &k->k_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx pin_fs_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx nl_table_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx nl_table_wait.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx reg_requests_lock irq_context: 0 rtnl_mutex &base->lock irq_context: 0 rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 rfkill_global_mutex irq_context: 0 rfkill_global_mutex fs_reclaim irq_context: 0 rfkill_global_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rfkill_global_mutex pool_lock#2 irq_context: 0 rfkill_global_mutex &k->list_lock irq_context: 0 rfkill_global_mutex lock irq_context: 0 rfkill_global_mutex lock kernfs_idr_lock irq_context: 0 rfkill_global_mutex &root->kernfs_rwsem irq_context: 0 rfkill_global_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 rfkill_global_mutex bus_type_sem irq_context: 0 rfkill_global_mutex sysfs_symlink_target_lock irq_context: 0 rfkill_global_mutex &root->kernfs_rwsem irq_context: 0 rfkill_global_mutex &c->lock irq_context: 0 rfkill_global_mutex &____s->seqcount irq_context: 0 rfkill_global_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 rfkill_global_mutex &dev->power.lock irq_context: 0 rfkill_global_mutex dpm_list_mtx irq_context: 0 rfkill_global_mutex &rfkill->lock irq_context: 0 rfkill_global_mutex uevent_sock_mutex irq_context: 0 rfkill_global_mutex &obj_hash[i].lock irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock/1 irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rfkill_global_mutex running_helpers_waitq.lock irq_context: 0 rfkill_global_mutex &k->k_lock irq_context: 0 rfkill_global_mutex subsys mutex#41 irq_context: 0 rfkill_global_mutex subsys mutex#41 &k->k_lock irq_context: 0 rfkill_global_mutex triggers_list_lock irq_context: 0 rfkill_global_mutex leds_list_lock irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rfkill_global_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill_global_led_trigger_work) rfkill_global_mutex rfkill_global_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill_global_led_trigger_work) rfkill_global_mutex &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill_global_led_trigger_work) rfkill_global_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill_global_led_trigger_work) rfkill_global_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rfkill_global_mutex &cfs_rq->removed.lock irq_context: 0 rfkill_global_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) rfkill_global_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rfkill->sync_work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx stack_depot_init_mutex irq_context: 0 rtnl_mutex &rdev->wiphy.mtx pcpu_alloc_mutex irq_context: 0 rtnl_mutex &rdev->wiphy.mtx pcpu_alloc_mutex pcpu_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &local->iflist_mtx irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &zone->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &xa->xa_lock#3 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx net_rwsem irq_context: 0 rtnl_mutex &rdev->wiphy.mtx net_rwsem &list->lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &x->wait#9 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx lock kernfs_idr_lock pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx subsys mutex#17 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx subsys mutex#17 &k->k_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &dir->lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx dev_hotplug_mutex irq_context: 0 rtnl_mutex &rdev->wiphy.mtx dev_hotplug_mutex &dev->power.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx dev_base_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx input_pool.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx batched_entropy_u32.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &tbl->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx sysctl_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &fq->lock irq_context: 0 hwsim_radio_lock rcu_read_lock rhashtable_bucket irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock pool_lock#2 irq_context: 0 rfkill_global_mutex &pcp->lock &zone->lock irq_context: 0 rfkill_global_mutex &zone->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &set->tag_list_lock &rq->__lock irq_context: 0 &dev->mutex crngs.lock irq_context: 0 &dev->mutex cpu_hotplug_lock wq_pool_mutex &c->lock irq_context: 0 &dev->mutex cpu_hotplug_lock wq_pool_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex cpu_hotplug_lock wq_pool_mutex &zone->lock irq_context: 0 &dev->mutex cpu_hotplug_lock wq_pool_mutex &____s->seqcount irq_context: 0 &dev->mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex rcu_read_lock &rq->__lock irq_context: 0 &dev->mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex rtnl_mutex subsys mutex#55 irq_context: 0 &dev->mutex rtnl_mutex subsys mutex#55 &k->k_lock irq_context: 0 &dev->mutex rtnl_mutex stack_depot_init_mutex irq_context: 0 &dev->mutex rtnl_mutex crngs.lock irq_context: 0 &dev->mutex rtnl_mutex &sdata->sec_mtx irq_context: 0 &dev->mutex rtnl_mutex &sdata->sec_mtx &sec->lock irq_context: 0 &dev->mutex rtnl_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex rtnl_mutex &local->iflist_mtx#2 irq_context: 0 &dev->mutex hwsim_phys_lock irq_context: 0 &dev->mutex nl_table_lock irq_context: 0 &dev->mutex nl_table_wait.lock irq_context: 0 &dev->mutex cpu_hotplug_lock wq_pool_mutex rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex rtnl_mutex gdp_mutex &c->lock irq_context: 0 &dev->mutex rtnl_mutex gdp_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex rtnl_mutex gdp_mutex &zone->lock irq_context: 0 &dev->mutex rtnl_mutex gdp_mutex &____s->seqcount irq_context: 0 &dev->mutex rtnl_mutex uevent_sock_mutex &____s->seqcount irq_context: 0 &dev->mutex rtnl_mutex &n->list_lock irq_context: 0 &dev->mutex rtnl_mutex &n->list_lock &c->lock irq_context: 0 &dev->mutex hwsim_phys_lock fs_reclaim irq_context: 0 &dev->mutex hwsim_phys_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex hwsim_phys_lock pool_lock#2 irq_context: 0 &dev->mutex hwsim_phys_lock &____s->seqcount irq_context: 0 &dev->mutex hwsim_phys_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex hwsim_phys_lock &zone->lock irq_context: 0 &dev->mutex hwsim_phys_lock rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex hwsim_phys_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &meta->lock irq_context: 0 &sig->cred_guard_mutex kfence_freelist_lock irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex cpu_hotplug_lock cpuhp_state_mutex &rq->__lock irq_context: 0 fs_reclaim &cfs_rq->removed.lock irq_context: 0 fs_reclaim &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key rcu_read_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key rcu_node_0 irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock cpu_hotplug_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &q->sysfs_lock cpu_hotplug_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &x->wait#15 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &k->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex bus_type_sem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex dpm_list_mtx irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex uevent_sock_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex running_helpers_waitq.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex &dev->power.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex &k->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#43 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex device_links_srcu irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->power.lock &dev->power.lock/1 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex device_links_srcu irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex fwnode_link_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex device_links_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex probe_waitqueue.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &dev->mutex &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex async_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#43 &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex bio_slab_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex pcpu_alloc_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex percpu_counters_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &sb->s_type->i_lock_key#3 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &s->s_inode_list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &xa->xa_lock#9 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex lock &q->queue_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex lock &q->queue_lock &blkcg->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->mq_freeze_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->mq_freeze_lock percpu_ref_switch_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->mq_freeze_lock percpu_ref_switch_lock rcu_read_lock &q->mq_freeze_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex percpu_ref_switch_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->queue_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->queue_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->mq_freeze_lock &q->mq_freeze_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->queue_lock pcpu_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->queue_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->queue_lock percpu_counters_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex sd_index_ida.xa_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex subsys mutex#56 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex subsys mutex#56 &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex tk_core.seq.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &hctx->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex rcu_read_lock &hctx->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &x->wait#16 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &base->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex (&timer.timer) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex (console_sem).lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex console_owner_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex console_owner irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex console_lock console_srcu console_owner irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 console_owner irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex gdp_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex gdp_mutex &k->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex gdp_mutex fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex gdp_mutex pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex gdp_mutex lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 lock sg_index_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 lock sg_index_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 chrdevs_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 &x->wait#9 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 &k->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 gdp_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 gdp_mutex &k->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 gdp_mutex fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 gdp_mutex pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 gdp_mutex lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 gdp_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 gdp_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 lock kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 bus_type_sem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 &dev->power.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 dpm_list_mtx irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 req_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 &x->wait#11 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 uevent_sock_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 running_helpers_waitq.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 subsys mutex#57 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 subsys mutex#57 &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 (console_sem).lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 console_lock console_srcu console_owner irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#45 console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex bsg_minor_ida.xa_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex chrdevs_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex req_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex &x->wait#11 irq_context: 0 xdomain_lock irq_context: 0 xdomain_lock fs_reclaim irq_context: 0 xdomain_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 xdomain_lock pool_lock#2 irq_context: 0 ioctl_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &bdev->bd_size_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex elv_list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex (work_completion)(&(&q->requeue_work)->work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex (work_completion)(&(&hctx->run_work)->work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex batched_entropy_u32.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex fs_reclaim &rq->__lock irq_context: 0 address_handler_list_lock irq_context: 0 card_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#58 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &shost->scan_mutex subsys mutex#58 &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) async_scan_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->power.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex fs_reclaim &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex fs_reclaim &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &n->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->debugfs_mutex irq_context: 0 lock pidmap_lock &c->lock irq_context: 0 lock pidmap_lock &pcp->lock &zone->lock irq_context: 0 lock pidmap_lock &zone->lock irq_context: 0 lock pidmap_lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex req_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &x->wait#11 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex subsys mutex#37 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex subsys mutex#37 &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex dev_hotplug_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock lock kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex pin_fs_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock lock kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock pcpu_alloc_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock pcpu_alloc_mutex pcpu_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->mq_freeze_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->mq_freeze_lock percpu_ref_switch_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->mq_freeze_lock percpu_ref_switch_lock rcu_read_lock &q->mq_freeze_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex percpu_ref_switch_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->mq_freeze_lock &q->mq_freeze_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex pin_fs_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &q->rq_qos_mutex &q->debugfs_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &q->sysfs_dir_lock &q->sysfs_lock &stats->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex subsys mutex#38 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex subsys mutex#38 &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex cgwb_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex bdi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex inode_hash_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex inode_hash_lock &sb->s_type->i_lock_key#3 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex bdev_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex fs_reclaim irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex tk_core.seq.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &hctx->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock &hctx->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock batched_entropy_u32.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &x->wait#16 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &base->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex (&timer.timer) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &q->sysfs_dir_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &q->sysfs_dir_lock &q->sysfs_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &q->sysfs_dir_lock &q->sysfs_lock &rq->__lock irq_context: 0 subsys mutex#59 irq_context: 0 subsys mutex#59 &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &n->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &n->list_lock &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 &x->wait#18 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &bdev->bd_size_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex free_vmap_area_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex vmap_area_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex init_mm.page_table_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &xa->xa_lock#8 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex lock#4 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &mapping->private_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &dd->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &folio_wait_table[i] irq_context: 0 (wq_completion)kblockd irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) rcu_read_lock &dd->lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &x->wait#18 &p->pi_lock irq_context: 0 &x->wait#18 &p->pi_lock &rq->__lock irq_context: 0 &x->wait#18 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &txlock irq_context: 0 &txlock &list->lock#3 irq_context: 0 &txlock &txwq irq_context: softirq &ret->b_uptodate_lock irq_context: softirq &folio_wait_table[i] irq_context: softirq &folio_wait_table[i] &p->pi_lock irq_context: softirq &folio_wait_table[i] &p->pi_lock &rq->__lock irq_context: softirq &folio_wait_table[i] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex (console_sem).lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex console_lock console_srcu console_owner irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &sb->s_type->i_lock_key#3 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &s->s_inode_list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex pcpu_alloc_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &x->wait#9 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &k->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex bus_type_sem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &dev->power.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex dpm_list_mtx irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex req_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &x->wait#11 irq_context: 0 &iocq[i].lock irq_context: 0 &iocq[i].lock &ktiowq[i] irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex subsys mutex#37 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex subsys mutex#37 &k->k_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &xa->xa_lock#9 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &xa->xa_lock#9 &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &xa->xa_lock#9 &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &xa->xa_lock#9 &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &xa->xa_lock#9 &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &xa->xa_lock#9 pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex inode_hash_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex inode_hash_lock &sb->s_type->i_lock_key#3 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex purge_vmap_area_lock irq_context: 0 &txwq irq_context: 0 &txwq &p->pi_lock irq_context: 0 &txwq &p->pi_lock &rq->__lock irq_context: 0 &txwq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex &sb->s_type->i_lock_key#3 &xa->xa_lock#8 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &dev->mutex &disk->open_mutex lock#5 irq_context: 0 rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh pool_lock#2 irq_context: 0 subsys mutex#60 irq_context: 0 subsys mutex#60 &k->k_lock irq_context: 0 usb_bus_idr_lock irq_context: 0 usb_bus_idr_lock (usb_notifier_list).rwsem irq_context: 0 rcu_read_lock &rcu_state.gp_wq irq_context: 0 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 table_lock irq_context: 0 table_lock &k->list_lock irq_context: 0 table_lock fs_reclaim irq_context: 0 table_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 table_lock pool_lock#2 irq_context: 0 table_lock lock irq_context: 0 table_lock lock kernfs_idr_lock irq_context: 0 table_lock &root->kernfs_rwsem irq_context: 0 table_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 table_lock &k->k_lock irq_context: 0 table_lock uevent_sock_mutex irq_context: 0 table_lock &c->lock irq_context: 0 table_lock &pcp->lock &zone->lock irq_context: 0 table_lock &zone->lock irq_context: 0 table_lock &____s->seqcount irq_context: 0 table_lock &obj_hash[i].lock irq_context: 0 table_lock rcu_read_lock &pool->lock/1 irq_context: 0 table_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 table_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 table_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 table_lock running_helpers_waitq.lock irq_context: 0 table_lock (console_sem).lock irq_context: 0 table_lock console_lock console_srcu console_owner_lock irq_context: 0 table_lock console_lock console_srcu console_owner irq_context: 0 table_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 table_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 table_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 table_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 table_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 table_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 table_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 table_lock &obj_hash[i].lock pool_lock irq_context: 0 table_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 table_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) lock pidmap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 table_lock &rq->__lock irq_context: 0 table_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex devtree_lock irq_context: 0 &dev->mutex usb_bus_idr_lock irq_context: 0 &dev->mutex usb_bus_idr_lock fs_reclaim irq_context: 0 &dev->mutex usb_bus_idr_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock pool_lock#2 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem irq_context: 0 &dev->mutex (usb_notifier_list).rwsem fs_reclaim irq_context: 0 &dev->mutex (usb_notifier_list).rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex (usb_notifier_list).rwsem pool_lock#2 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem pin_fs_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &x->wait#9 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &obj_hash[i].lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &k->list_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex &k->list_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex fs_reclaim irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex pool_lock#2 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex (usb_notifier_list).rwsem lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem lock kernfs_idr_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &root->kernfs_rwsem irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex (usb_notifier_list).rwsem bus_type_sem irq_context: 0 &dev->mutex (usb_notifier_list).rwsem sysfs_symlink_target_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &root->kernfs_rwsem irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &c->lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &____s->seqcount irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &dev->power.lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem dpm_list_mtx irq_context: 0 &dev->mutex (usb_notifier_list).rwsem req_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &p->pi_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &rq->__lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &x->wait#11 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem uevent_sock_mutex irq_context: 0 &dev->mutex (usb_notifier_list).rwsem rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem running_helpers_waitq.lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &k->k_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem subsys mutex#60 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem subsys mutex#60 &k->k_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem mon_lock irq_context: 0 &dev->mutex usb_port_peer_mutex irq_context: 0 &dev->mutex device_state_lock irq_context: 0 &dev->mutex usb_bus_idr_lock mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock hcd_root_hub_lock irq_context: 0 &dev->mutex usb_bus_idr_lock hcd_root_hub_lock hcd_urb_list_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &obj_hash[i].lock irq_context: 0 &dev->mutex usb_bus_idr_lock hcd_root_hub_lock &bh->lock irq_context: 0 &dev->mutex usb_bus_idr_lock hcd_root_hub_lock &p->pi_lock irq_context: 0 &dev->mutex usb_bus_idr_lock hcd_root_hub_lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock hcd_root_hub_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex usb_bus_idr_lock &rq->__lock irq_context: softirq &bh->lock irq_context: softirq lock#6 irq_context: softirq lock#6 kcov_remote_lock irq_context: softirq &x->wait#19 irq_context: 0 &dev->mutex usb_bus_idr_lock &x->wait#19 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->power.lock irq_context: 0 &dev->mutex usb_bus_idr_lock device_links_srcu irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->power.lock &dev->power.lock/1 irq_context: 0 &dev->mutex usb_bus_idr_lock &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock (console_sem).lock irq_context: 0 &dev->mutex usb_bus_idr_lock console_lock console_srcu console_owner_lock irq_context: 0 &dev->mutex usb_bus_idr_lock console_lock console_srcu console_owner irq_context: 0 &dev->mutex usb_bus_idr_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 &dev->mutex usb_bus_idr_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 &dev->mutex usb_bus_idr_lock input_pool.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &k->list_lock irq_context: 0 &dev->mutex usb_bus_idr_lock lock irq_context: 0 &dev->mutex usb_bus_idr_lock lock kernfs_idr_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &root->kernfs_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock bus_type_sem irq_context: 0 &dev->mutex usb_bus_idr_lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock sysfs_symlink_target_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &k->k_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &root->kernfs_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock dpm_list_mtx irq_context: 0 &dev->mutex usb_bus_idr_lock req_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &p->pi_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex usb_bus_idr_lock &x->wait#11 irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem fs_reclaim irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem lock irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem lock kernfs_idr_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem &root->kernfs_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem &root->kernfs_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock uevent_sock_mutex irq_context: 0 &dev->mutex usb_bus_idr_lock rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex usb_bus_idr_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex usb_bus_idr_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex usb_bus_idr_lock running_helpers_waitq.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->power.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &k->list_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &k->k_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex device_links_srcu irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex fwnode_link_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex device_links_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &(&priv->bus_notifier)->rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex fs_reclaim irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex set_config_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex devtree_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex &x->wait#9 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex &dev->power.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex fs_reclaim irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex hcd_root_hub_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex hcd_root_hub_lock hcd_urb_list_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex hcd_root_hub_lock &bh->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex hcd_root_hub_lock &p->pi_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex hcd_root_hub_lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex hcd_root_hub_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex &x->wait#19 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex device_state_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex bus_type_sem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex dpm_list_mtx irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex uevent_sock_mutex irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &obj_hash[i].lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex running_helpers_waitq.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &dev->power.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &k->list_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &k->k_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &new_driver->dynids.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex device_links_srcu irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex fwnode_link_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex device_links_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &(&priv->bus_notifier)->rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex fs_reclaim irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &dev->power.lock &dev->power.lock/1 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex (console_sem).lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex console_lock console_srcu console_owner_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex console_lock console_srcu console_owner irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &obj_hash[i].lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex hcd_root_hub_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex hcd_root_hub_lock hcd_urb_list_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &dum_hcd->dum->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex hcd_root_hub_lock &bh->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex hcd_root_hub_lock &p->pi_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex hcd_root_hub_lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex hcd_root_hub_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &x->wait#19 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex hcd_root_hub_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex hcd_root_hub_lock hcd_urb_list_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex fs_reclaim irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex &dum_hcd->dum->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex hcd_root_hub_lock &bh->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex hcd_root_hub_lock &p->pi_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex hcd_root_hub_lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex hcd_root_hub_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex &x->wait#19 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex fs_reclaim irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &x->wait#9 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &k->list_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex bus_type_sem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &dev->power.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dpm_list_mtx irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &k->k_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_mtx irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_mtx fs_reclaim irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_mtx pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_mtx &dev->power.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_mtx pm_qos_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex component_mutex irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex device_links_srcu irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &dev->power.lock &dev->power.lock/1 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx fs_reclaim irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx lock kernfs_idr_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &dev->power.lock rcu_read_lock &pool->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &dev->power.lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &dev->power.lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &dev->power.lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &dev->power.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &dev->power.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &lock->wait_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &pool->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex deferred_probe_mutex irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex uevent_sock_mutex irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex running_helpers_waitq.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex probe_waitqueue.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex subsys mutex#61 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &x->wait#9 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex (usb_notifier_list).rwsem irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex deferred_probe_mutex irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex probe_waitqueue.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &lock->wait_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex hcd_root_hub_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex hcd_root_hub_lock hcd_urb_list_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex fs_reclaim irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &dum_hcd->dum->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex hcd_root_hub_lock &bh->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex hcd_root_hub_lock &p->pi_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex hcd_root_hub_lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex hcd_root_hub_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &x->wait#19 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &base->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &pool->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &x->wait#19 &p->pi_lock irq_context: softirq &x->wait#19 &p->pi_lock &rq->__lock irq_context: softirq &x->wait#19 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex (&timer.timer) irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex hcd_root_hub_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex hcd_root_hub_lock hcd_urb_list_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)usb_hub_wq irq_context: 0 (wq_completion)usb_hub_wq (work_completion)(&hub->events) irq_context: 0 (wq_completion)usb_hub_wq (work_completion)(&hub->events) lock#6 irq_context: 0 (wq_completion)usb_hub_wq (work_completion)(&hub->events) lock#6 kcov_remote_lock irq_context: 0 (wq_completion)usb_hub_wq (work_completion)(&hub->events) &dev->mutex &dev->power.lock irq_context: 0 (wq_completion)usb_hub_wq (work_completion)(&hub->events) &dev->power.lock irq_context: 0 (wq_completion)usb_hub_wq (work_completion)(&hub->events) &dev->power.lock &dev->power.wait_queue irq_context: 0 &dev->mutex usb_bus_idr_lock subsys mutex#61 irq_context: 0 &dev->mutex usb_bus_idr_lock &x->wait#9 irq_context: 0 &dev->mutex usb_bus_idr_lock &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &hub->irq_urb_lock irq_context: 0 &dev->mutex usb_bus_idr_lock (&hub->irq_urb_retry) irq_context: 0 &dev->mutex usb_bus_idr_lock &base->lock irq_context: 0 &dev->mutex usb_bus_idr_lock hcd_urb_unlink_lock irq_context: softirq usb_kill_urb_queue.lock irq_context: 0 &dev->mutex usb_bus_idr_lock (work_completion)(&hub->tt.clear_work) irq_context: 0 &dev->mutex usb_bus_idr_lock &dum_hcd->dum->lock irq_context: 0 &dev->mutex usb_bus_idr_lock device_state_lock irq_context: 0 &dev->mutex usb_bus_idr_lock hcd_urb_list_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->power.lock &dev->power.wait_queue irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 &zone->lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &pcp->lock &zone->lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &zone->lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem lock kernfs_idr_lock pool_lock#2 irq_context: softirq rcu_callback &base->lock irq_context: softirq rcu_callback &base->lock &obj_hash[i].lock irq_context: 0 &dev->mutex usb_bus_idr_lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &(&priv->bus_notifier)->rwsem &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex hcd_root_hub_lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_mtx &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_mtx &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex &c->lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex &zone->lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem gdp_mutex &____s->seqcount irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &cfs_rq->removed.lock irq_context: 0 &dev->mutex usb_bus_idr_lock hcd_root_hub_lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex usb_bus_idr_lock batched_entropy_u8.lock irq_context: 0 &dev->mutex usb_bus_idr_lock kfence_freelist_lock irq_context: 0 &dev->mutex usb_bus_idr_lock lock kernfs_idr_lock &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock lock kernfs_idr_lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock lock kernfs_idr_lock &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &obj_hash[i].lock pool_lock irq_context: softirq lib/debugobjects.c:101 irq_context: softirq lib/debugobjects.c:101 rcu_read_lock &pool->lock irq_context: softirq lib/debugobjects.c:101 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq lib/debugobjects.c:101 rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq lib/debugobjects.c:101 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq lib/debugobjects.c:101 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq lib/debugobjects.c:101 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (debug_obj_work).work irq_context: 0 &dev->mutex usb_bus_idr_lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex udc_lock irq_context: 0 &dev->mutex subsys mutex#62 irq_context: 0 &dev->mutex subsys mutex#62 &k->k_lock irq_context: 0 &dev->mutex gadget_id_numbers.xa_lock irq_context: 0 (wq_completion)events (work_completion)(&gadget->work) irq_context: 0 (wq_completion)events (work_completion)(&gadget->work) &root->kernfs_rwsem irq_context: 0 (wq_completion)events (work_completion)(&gadget->work) kernfs_notify_lock irq_context: 0 (wq_completion)events (work_completion)(&gadget->work) kernfs_notify_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&gadget->work) kernfs_notify_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events kernfs_notify_work irq_context: 0 (wq_completion)events kernfs_notify_work kernfs_notify_lock irq_context: 0 (wq_completion)events kernfs_notify_work &root->kernfs_supers_rwsem irq_context: 0 &dev->mutex subsys mutex#63 irq_context: 0 &dev->mutex gdp_mutex &c->lock irq_context: 0 &dev->mutex gdp_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex gdp_mutex &zone->lock irq_context: 0 &dev->mutex gdp_mutex &____s->seqcount irq_context: 0 &dev->mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 func_lock irq_context: 0 g_tf_lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 misc_mtx &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &vhci_hcd->vhci->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex &vhci_hcd->vhci->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex batched_entropy_u8.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex kfence_freelist_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_mtx &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_mtx &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_mtx &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &dev->power.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &vhci_hcd->vhci->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &vhci_hcd->vhci->lock irq_context: 0 &dev->mutex usb_bus_idr_lock mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex &hub->status_mutex &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex lock kernfs_idr_lock &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex lock kernfs_idr_lock &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &c->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &zone->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hub->init_work)->work) &dev->mutex &hub->status_mutex &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex dev_pm_qos_sysfs_mtx lock kernfs_idr_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex hcd->bandwidth_mutex &c->lock irq_context: 0 &sig->cred_guard_mutex &n->list_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex lock kernfs_idr_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex fs_reclaim &rq->__lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex lock kernfs_idr_lock pool_lock#2 irq_context: softirq net/core/link_watch.c:31 irq_context: softirq net/core/link_watch.c:31 rcu_read_lock &pool->lock irq_context: softirq net/core/link_watch.c:31 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq net/core/link_watch.c:31 rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq net/core/link_watch.c:31 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq net/core/link_watch.c:31 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex usb_bus_idr_lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex usb_bus_idr_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex lock kernfs_idr_lock &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex lock kernfs_idr_lock &____s->seqcount irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 batched_entropy_u8.lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &sb->s_type->i_mutex_key#3 kfence_freelist_lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex fill_pool_map-wait-type-override &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock quarantine_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex lock kernfs_idr_lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex batched_entropy_u8.lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex kfence_freelist_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &meta->lock irq_context: 0 &dev->mutex (usb_notifier_list).rwsem fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &dev->mutex (usb_notifier_list).rwsem fill_pool_map-wait-type-override pool_lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex fill_pool_map-wait-type-override &zone->lock irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex usb_port_peer_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &dev->mutex usb_bus_idr_lock &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 &dev->mutex &dev->mutex probe_waitqueue.lock irq_context: 0 i8042_lock irq_context: 0 &dev->mutex i8042_lock irq_context: 0 &dev->mutex i8042_lock (console_sem).lock irq_context: 0 &dev->mutex i8042_lock console_lock console_srcu console_owner_lock irq_context: 0 &dev->mutex i8042_lock console_lock console_srcu console_owner irq_context: 0 &dev->mutex i8042_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 &dev->mutex i8042_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 &dev->mutex &desc->request_mutex &irq_desc_lock_class ioapic_lock irq_context: 0 &dev->mutex &desc->request_mutex &irq_desc_lock_class mask_lock tmp_mask_lock ioapic_lock irq_context: 0 &dev->mutex &desc->request_mutex &irq_desc_lock_class ioapic_lock i8259A_lock irq_context: 0 &dev->mutex &x->wait#20 irq_context: hardirq i8042_lock &x->wait#20 irq_context: hardirq i8042_lock &x->wait#20 &p->pi_lock irq_context: hardirq i8042_lock &x->wait#20 &p->pi_lock &rq->__lock irq_context: hardirq i8042_lock &x->wait#20 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex (&timer.timer) irq_context: 0 &dev->mutex &desc->request_mutex &irq_desc_lock_class irq_resend_lock irq_context: 0 &dev->mutex &desc->request_mutex proc_subdir_lock irq_context: 0 &dev->mutex &desc->request_mutex &ent->pde_unload_lock irq_context: 0 &dev->mutex &desc->request_mutex proc_inum_ida.xa_lock irq_context: 0 &dev->mutex &desc->request_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex &desc->request_mutex pool_lock#2 irq_context: 0 &dev->mutex serio_event_lock irq_context: 0 &dev->mutex serio_event_lock pool_lock#2 irq_context: 0 &dev->mutex serio_event_lock rcu_read_lock &pool->lock irq_context: 0 &dev->mutex serio_event_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &dev->mutex serio_event_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &dev->mutex serio_event_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex serio_event_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex serio_event_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_long irq_context: 0 (wq_completion)events_long serio_event_work irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex serio_event_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex i8042_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex fs_reclaim irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &k->list_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &device->physical_node_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &device->physical_node_lock sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &device->physical_node_lock fs_reclaim irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &device->physical_node_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &device->physical_node_lock pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &device->physical_node_lock lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &device->physical_node_lock lock kernfs_idr_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &device->physical_node_lock &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &device->physical_node_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &c->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &____s->seqcount irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex semaphore->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &k->k_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->power.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex dpm_list_mtx irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex uevent_sock_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex running_helpers_waitq.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &dev->power.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &k->list_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &k->k_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex subsys mutex#64 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex bus_type_sem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 input_ida.xa_lock irq_context: 0 input_ida.xa_lock pool_lock#2 irq_context: 0 subsys mutex#30 irq_context: 0 subsys mutex#30 &k->k_lock irq_context: 0 input_mutex input_ida.xa_lock irq_context: 0 input_mutex fs_reclaim irq_context: 0 input_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 input_mutex pool_lock#2 irq_context: 0 input_mutex &x->wait#9 irq_context: 0 input_mutex &obj_hash[i].lock irq_context: 0 input_mutex &dev->mutex#2 irq_context: 0 input_mutex chrdevs_lock irq_context: 0 input_mutex &k->list_lock irq_context: 0 input_mutex lock irq_context: 0 input_mutex lock kernfs_idr_lock irq_context: 0 input_mutex &root->kernfs_rwsem irq_context: 0 input_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 input_mutex bus_type_sem irq_context: 0 input_mutex sysfs_symlink_target_lock irq_context: 0 input_mutex &root->kernfs_rwsem irq_context: 0 input_mutex &dev->power.lock irq_context: 0 input_mutex dpm_list_mtx irq_context: 0 input_mutex req_lock irq_context: 0 input_mutex &p->pi_lock irq_context: 0 input_mutex &p->pi_lock &rq->__lock irq_context: 0 input_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 input_mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 input_mutex &x->wait#11 irq_context: 0 input_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 input_mutex uevent_sock_mutex irq_context: 0 input_mutex &obj_hash[i].lock pool_lock irq_context: 0 input_mutex rcu_read_lock &pool->lock/1 irq_context: 0 input_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 input_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 input_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 input_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 input_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 input_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 input_mutex running_helpers_waitq.lock irq_context: 0 input_mutex &k->k_lock irq_context: 0 input_mutex subsys mutex#30 irq_context: 0 input_mutex subsys mutex#30 &k->k_lock irq_context: 0 input_mutex &c->lock irq_context: 0 input_mutex &pcp->lock &zone->lock irq_context: 0 input_mutex &zone->lock irq_context: 0 input_mutex &____s->seqcount irq_context: 0 input_mutex &cfs_rq->removed.lock irq_context: 0 serio_event_lock irq_context: 0 serio_event_lock pool_lock#2 irq_context: 0 serio_event_lock rcu_read_lock &pool->lock irq_context: 0 serio_event_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 serio_event_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 serio_event_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 serio_event_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 serio_event_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex device_links_srcu irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex fwnode_link_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex device_links_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &(&priv->bus_notifier)->rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex fs_reclaim irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &c->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &____s->seqcount irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex fs_reclaim irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &x->wait#9 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &serio->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex &serio->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex i8042_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex &ps2dev->wait irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex &obj_hash[i].lock irq_context: hardirq &serio->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex &base->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex &pool->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &new_driver->dynids.lock irq_context: 0 &dev->mutex rtc_ida.xa_lock irq_context: 0 &dev->mutex rtc_lock irq_context: 0 &dev->mutex &rtc->ops_lock irq_context: 0 &dev->mutex &rtc->ops_lock rtc_lock irq_context: 0 &dev->mutex chrdevs_lock irq_context: 0 &dev->mutex lock kernfs_idr_lock &c->lock irq_context: 0 &dev->mutex lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex lock kernfs_idr_lock &zone->lock irq_context: 0 &dev->mutex lock kernfs_idr_lock &____s->seqcount irq_context: 0 &dev->mutex req_lock irq_context: 0 &dev->mutex &x->wait#11 irq_context: 0 &dev->mutex subsys mutex#27 irq_context: 0 &dev->mutex subsys mutex#27 &k->k_lock irq_context: 0 &dev->mutex subsys mutex#27 fs_reclaim irq_context: 0 &dev->mutex subsys mutex#27 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex subsys mutex#27 pool_lock#2 irq_context: 0 &dev->mutex subsys mutex#27 &x->wait#9 irq_context: 0 &dev->mutex subsys mutex#27 &obj_hash[i].lock irq_context: 0 &dev->mutex subsys mutex#27 platform_devid_ida.xa_lock irq_context: 0 &dev->mutex subsys mutex#27 &k->list_lock irq_context: 0 &dev->mutex subsys mutex#27 lock irq_context: 0 &dev->mutex subsys mutex#27 lock kernfs_idr_lock irq_context: 0 &dev->mutex subsys mutex#27 &root->kernfs_rwsem irq_context: 0 &dev->mutex subsys mutex#27 &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex subsys mutex#27 bus_type_sem irq_context: 0 &dev->mutex subsys mutex#27 sysfs_symlink_target_lock irq_context: 0 &dev->mutex subsys mutex#27 &root->kernfs_rwsem irq_context: 0 &dev->mutex subsys mutex#27 &dev->power.lock irq_context: 0 &dev->mutex subsys mutex#27 dpm_list_mtx irq_context: 0 &dev->mutex subsys mutex#27 &(&priv->bus_notifier)->rwsem irq_context: 0 &dev->mutex subsys mutex#27 uevent_sock_mutex irq_context: 0 &dev->mutex subsys mutex#27 rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex subsys mutex#27 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex subsys mutex#27 rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex subsys mutex#27 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex subsys mutex#27 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex subsys mutex#27 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex subsys mutex#27 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex subsys mutex#27 running_helpers_waitq.lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex &dev->power.lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex &k->list_lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex &k->k_lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex device_links_srcu irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex fwnode_link_lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex device_links_lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex &(&priv->bus_notifier)->rwsem irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex fs_reclaim irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex pool_lock#2 irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex deferred_probe_mutex irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex &c->lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex &zone->lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex &____s->seqcount irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex uevent_sock_mutex irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex &obj_hash[i].lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex running_helpers_waitq.lock irq_context: 0 &dev->mutex subsys mutex#27 &dev->mutex probe_waitqueue.lock irq_context: 0 &dev->mutex subsys mutex#27 subsys mutex#3 irq_context: 0 &dev->mutex subsys mutex#27 &c->lock irq_context: 0 &dev->mutex subsys mutex#27 &____s->seqcount irq_context: 0 &dev->mutex subsys mutex#27 wakeup_ida.xa_lock irq_context: 0 &dev->mutex subsys mutex#27 gdp_mutex irq_context: 0 &dev->mutex subsys mutex#27 gdp_mutex &k->list_lock irq_context: 0 &dev->mutex subsys mutex#27 gdp_mutex fs_reclaim irq_context: 0 &dev->mutex subsys mutex#27 gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex subsys mutex#27 gdp_mutex pool_lock#2 irq_context: 0 &dev->mutex subsys mutex#27 gdp_mutex lock irq_context: 0 &dev->mutex subsys mutex#27 gdp_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex subsys mutex#27 gdp_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex subsys mutex#27 gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex subsys mutex#27 &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex subsys mutex#27 subsys mutex#15 irq_context: 0 &dev->mutex subsys mutex#27 subsys mutex#15 &k->k_lock irq_context: 0 &dev->mutex subsys mutex#27 events_lock irq_context: 0 &dev->mutex subsys mutex#27 rtcdev_lock irq_context: softirq &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 g_smscore_deviceslock irq_context: 0 g_smscore_deviceslock fs_reclaim irq_context: 0 g_smscore_deviceslock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 g_smscore_deviceslock pool_lock#2 irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) &pcp->lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) &pcp->lock &zone->lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) &pcp->lock &zone->lock &____s->seqcount irq_context: 0 cx231xx_devlist_mutex irq_context: 0 em28xx_devlist_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex (&timer.timer) irq_context: hardirq &serio->lock &ps2dev->wait irq_context: hardirq &serio->lock &ps2dev->wait &p->pi_lock irq_context: hardirq &serio->lock &ps2dev->wait &p->pi_lock &rq->__lock irq_context: hardirq &serio->lock &ps2dev->wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &k->list_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex gdp_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex gdp_mutex &k->list_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex gdp_mutex fs_reclaim irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex gdp_mutex pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex gdp_mutex &c->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex gdp_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex gdp_mutex &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex gdp_mutex &____s->seqcount irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex gdp_mutex lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex bus_type_sem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &c->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &____s->seqcount irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &dev->power.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex dpm_list_mtx irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex uevent_sock_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex running_helpers_waitq.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &k->k_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex subsys mutex#30 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex subsys mutex#30 &k->k_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex (console_sem).lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex console_lock console_srcu console_owner irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex fs_reclaim irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &dev->mutex#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &k->list_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &k->k_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access fs_reclaim irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access &c->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access &____s->seqcount irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access &x->wait#9 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access &k->list_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access lock kernfs_idr_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access bus_type_sem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access &dev->power.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access dpm_list_mtx irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access uevent_sock_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access running_helpers_waitq.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access &k->k_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access subsys mutex#65 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access subsys mutex#65 &k->k_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access leds_list_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock fs_reclaim irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock &trig->leddev_list_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock rcu_read_lock &dev->event_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock &c->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock &____s->seqcount irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock uevent_sock_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access triggers_list_lock &led_cdev->trigger_lock running_helpers_waitq.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access lock kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &led_cdev->led_access rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex input_ida.xa_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &x->wait#9 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex chrdevs_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex bus_type_sem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &c->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &____s->seqcount irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &dev->power.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex dpm_list_mtx irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex req_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &p->pi_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex &x->wait#11 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex uevent_sock_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex running_helpers_waitq.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex subsys mutex#30 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex subsys mutex#30 &k->k_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex input_mutex input_devices_poll_wait.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex deferred_probe_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex uevent_sock_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex running_helpers_waitq.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex probe_waitqueue.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_mutex pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex i8042_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex quarantine_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex fs_reclaim irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &x->wait#9 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &serio->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex &serio->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex i8042_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex &ps2dev->wait irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex &base->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex (&timer.timer) irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex &pool->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 pvr2_context_sync_data.lock irq_context: 0 &dev->mutex core_lock irq_context: 0 &dev->mutex core_lock fs_reclaim irq_context: 0 &dev->mutex core_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex core_lock pool_lock#2 irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem fs_reclaim irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem pool_lock#2 irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem i2c_dev_list_lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &x->wait#9 irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &obj_hash[i].lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem chrdevs_lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &k->list_lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem gdp_mutex irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem gdp_mutex &k->list_lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem gdp_mutex fs_reclaim irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem gdp_mutex pool_lock#2 irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem gdp_mutex lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem gdp_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem gdp_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem lock kernfs_idr_lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &root->kernfs_rwsem irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem bus_type_sem irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem sysfs_symlink_target_lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &root->kernfs_rwsem irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &dev->power.lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem dpm_list_mtx irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem req_lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &p->pi_lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &rq->__lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &x->wait#11 irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem fs_reclaim &rq->__lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem uevent_sock_mutex irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem uevent_sock_mutex &rq->__lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &c->lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &pcp->lock &zone->lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &zone->lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &____s->seqcount irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem running_helpers_waitq.lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem &k->k_lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem subsys mutex#66 irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem subsys mutex#66 &rq->__lock irq_context: 0 &dev->mutex &(&priv->bus_notifier)->rwsem subsys mutex#66 &k->k_lock irq_context: 0 &dev->mutex subsys mutex#67 irq_context: 0 &dev->mutex core_lock &k->list_lock irq_context: 0 &dev->mutex core_lock &k->k_lock irq_context: 0 &dev->mutex dvbdev_register_lock irq_context: 0 &dev->mutex dvbdev_register_lock (console_sem).lock irq_context: 0 &dev->mutex dvbdev_register_lock console_lock console_srcu console_owner_lock irq_context: 0 &dev->mutex dvbdev_register_lock console_lock console_srcu console_owner irq_context: 0 &dev->mutex dvbdev_register_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 &dev->mutex dvbdev_register_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 &dev->mutex (kmod_concurrent_max).lock irq_context: 0 &dev->mutex &x->wait#17 irq_context: hardirq &serio->lock &dev->power.lock irq_context: hardirq &serio->lock &dev->event_lock#2 irq_context: 0 &dev->mutex &dev->mutex &dev->power.lock &dev->power.lock/1 irq_context: 0 &dev->mutex &dev->mutex &dev->power.lock &dev->power.wait_queue irq_context: 0 &dev->mutex &dev->mutex device_links_srcu irq_context: 0 &dev->mutex &dev->mutex fwnode_link_lock irq_context: 0 &dev->mutex &dev->mutex device_links_lock irq_context: 0 &dev->mutex &dev->mutex &(&priv->bus_notifier)->rwsem irq_context: 0 &dev->mutex &dev->mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex &dev->mutex fs_reclaim irq_context: 0 &dev->mutex &dev->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &dev->mutex pool_lock#2 irq_context: 0 &dev->mutex &dev->mutex lock irq_context: 0 &dev->mutex &dev->mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex &dev->mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex &dev->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex &dev->mutex &dev->devres_lock irq_context: 0 &dev->mutex &dev->mutex deferred_probe_mutex irq_context: 0 &dev->mutex &dev->mutex uevent_sock_mutex irq_context: 0 &dev->mutex &dev->mutex &obj_hash[i].lock irq_context: 0 &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex &dev->mutex running_helpers_waitq.lock irq_context: 0 &dev->mutex &dev->mutex &c->lock irq_context: 0 &dev->mutex &dev->mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex &dev->mutex &zone->lock irq_context: 0 &dev->mutex &dev->mutex &____s->seqcount irq_context: 0 &dev->mutex &dev->mutex rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex frontend_mutex irq_context: 0 &dev->mutex frontend_mutex fs_reclaim irq_context: 0 &dev->mutex frontend_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex frontend_mutex pool_lock#2 irq_context: 0 &dev->mutex frontend_mutex (console_sem).lock irq_context: 0 &dev->mutex frontend_mutex console_lock console_srcu console_owner_lock irq_context: 0 &dev->mutex frontend_mutex console_lock console_srcu console_owner irq_context: 0 &dev->mutex frontend_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 &dev->mutex frontend_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock fs_reclaim irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock pool_lock#2 irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock minor_rwsem irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &xa->xa_lock#11 irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &mdev->graph_mutex irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &mdev->graph_mutex fs_reclaim irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &mdev->graph_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &mdev->graph_mutex pool_lock#2 irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock (console_sem).lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock console_lock console_srcu console_owner_lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock console_lock console_srcu console_owner irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &x->wait#9 irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &obj_hash[i].lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &k->list_lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock gdp_mutex irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock gdp_mutex &k->list_lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock gdp_mutex fs_reclaim irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock gdp_mutex pool_lock#2 irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock gdp_mutex lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock gdp_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock gdp_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock lock kernfs_idr_lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &root->kernfs_rwsem irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock bus_type_sem irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock sysfs_symlink_target_lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &c->lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &zone->lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &____s->seqcount irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &root->kernfs_rwsem irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &dev->power.lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock dpm_list_mtx irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock req_lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &p->pi_lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &rq->__lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &x->wait#11 irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock uevent_sock_mutex irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock running_helpers_waitq.lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock &k->k_lock irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock subsys mutex#68 irq_context: 0 &dev->mutex frontend_mutex dvbdev_register_lock subsys mutex#68 &k->k_lock irq_context: 0 &dev->mutex init_mm.page_table_lock irq_context: 0 &dev->mutex &dmxdev->lock irq_context: 0 &dev->mutex dvbdev_register_lock fs_reclaim irq_context: 0 &dev->mutex dvbdev_register_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex dvbdev_register_lock pool_lock#2 irq_context: 0 &dev->mutex dvbdev_register_lock minor_rwsem irq_context: 0 &dev->mutex dvbdev_register_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex dvbdev_register_lock &zone->lock irq_context: 0 &dev->mutex dvbdev_register_lock &____s->seqcount irq_context: 0 &dev->mutex dvbdev_register_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex dvbdev_register_lock &xa->xa_lock#11 irq_context: 0 &dev->mutex dvbdev_register_lock &mdev->graph_mutex irq_context: 0 &dev->mutex dvbdev_register_lock &c->lock irq_context: 0 &dev->mutex dvbdev_register_lock &xa->xa_lock#11 pool_lock#2 irq_context: 0 &dev->mutex dvbdev_register_lock &mdev->graph_mutex fs_reclaim irq_context: 0 &dev->mutex dvbdev_register_lock &mdev->graph_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex dvbdev_register_lock &mdev->graph_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex dvbdev_register_lock &mdev->graph_mutex &zone->lock irq_context: 0 &dev->mutex dvbdev_register_lock &mdev->graph_mutex &____s->seqcount irq_context: 0 &dev->mutex dvbdev_register_lock &mdev->graph_mutex pool_lock#2 irq_context: 0 &dev->mutex dvbdev_register_lock &mdev->graph_mutex rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex dvbdev_register_lock &mdev->graph_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex dvbdev_register_lock &x->wait#9 irq_context: 0 &dev->mutex dvbdev_register_lock &obj_hash[i].lock irq_context: 0 &dev->mutex dvbdev_register_lock &k->list_lock irq_context: 0 &dev->mutex dvbdev_register_lock gdp_mutex irq_context: 0 &dev->mutex dvbdev_register_lock gdp_mutex &k->list_lock irq_context: 0 &dev->mutex dvbdev_register_lock lock irq_context: 0 &dev->mutex dvbdev_register_lock lock kernfs_idr_lock irq_context: 0 &dev->mutex dvbdev_register_lock &root->kernfs_rwsem irq_context: 0 &dev->mutex dvbdev_register_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex dvbdev_register_lock bus_type_sem irq_context: 0 &dev->mutex dvbdev_register_lock sysfs_symlink_target_lock irq_context: 0 &dev->mutex dvbdev_register_lock &root->kernfs_rwsem irq_context: 0 &dev->mutex dvbdev_register_lock &dev->power.lock irq_context: 0 &dev->mutex dvbdev_register_lock dpm_list_mtx irq_context: 0 &dev->mutex dvbdev_register_lock req_lock irq_context: 0 &dev->mutex dvbdev_register_lock &p->pi_lock irq_context: 0 &dev->mutex dvbdev_register_lock &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex dvbdev_register_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex dvbdev_register_lock &rq->__lock irq_context: 0 &dev->mutex dvbdev_register_lock &x->wait#11 irq_context: 0 &dev->mutex dvbdev_register_lock uevent_sock_mutex irq_context: 0 &dev->mutex dvbdev_register_lock rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex dvbdev_register_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex dvbdev_register_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex dvbdev_register_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex dvbdev_register_lock running_helpers_waitq.lock irq_context: 0 &dev->mutex dvbdev_register_lock &k->k_lock irq_context: 0 &dev->mutex dvbdev_register_lock subsys mutex#68 irq_context: 0 &dev->mutex dvbdev_register_lock subsys mutex#68 &k->k_lock irq_context: 0 &dev->mutex &dvbdemux->mutex irq_context: 0 &dev->mutex media_devnode_lock irq_context: 0 &dev->mutex subsys mutex#69 irq_context: 0 &dev->mutex videodev_lock irq_context: 0 &dev->mutex subsys mutex#70 irq_context: 0 &dev->mutex subsys mutex#70 &k->k_lock irq_context: 0 &dev->mutex &xa->xa_lock#11 irq_context: 0 &dev->mutex &mdev->graph_mutex irq_context: 0 &dev->mutex &mdev->graph_mutex fs_reclaim irq_context: 0 &dev->mutex &mdev->graph_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &mdev->graph_mutex pool_lock#2 irq_context: 0 &dev->mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &dev->mutex vimc_sensor:393:(&vsensor->hdl)->_lock irq_context: 0 &dev->mutex &v4l2_dev->lock irq_context: 0 &dev->mutex vimc_debayer:578:(&vdebayer->hdl)->_lock irq_context: 0 &dev->mutex vimc_lens:61:(&vlens->hdl)->_lock irq_context: 0 &dev->mutex tk_core.seq.seqcount irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1608:(hdl_user_vid)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1610:(hdl_user_aud)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1614:(hdl_sdtv_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1616:(hdl_loop_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1618:(hdl_fb)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1620:(hdl_vid_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1622:(hdl_vid_out)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1625:(hdl_vbi_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1627:(hdl_vbi_out)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1630:(hdl_radio_rx)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1632:(hdl_radio_tx)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1634:(hdl_sdr_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1636:(hdl_meta_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1638:(hdl_meta_out)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1640:(hdl_tch_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock vivid_ctrls:1620:(hdl_vid_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock fs_reclaim irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock pool_lock#2 irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock &c->lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock &____s->seqcount irq_context: 0 &dev->mutex vivid_ctrls:1608:(hdl_user_vid)->_lock vivid_ctrls:1620:(hdl_vid_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1608:(hdl_user_vid)->_lock fs_reclaim irq_context: 0 &dev->mutex vivid_ctrls:1608:(hdl_user_vid)->_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex vivid_ctrls:1608:(hdl_user_vid)->_lock pool_lock#2 irq_context: 0 &dev->mutex vivid_ctrls:1610:(hdl_user_aud)->_lock vivid_ctrls:1620:(hdl_vid_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1610:(hdl_user_aud)->_lock fs_reclaim irq_context: 0 &dev->mutex vivid_ctrls:1610:(hdl_user_aud)->_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex vivid_ctrls:1610:(hdl_user_aud)->_lock pool_lock#2 irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock vivid_ctrls:1620:(hdl_vid_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock fs_reclaim irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock pool_lock#2 irq_context: 0 &dev->mutex vivid_ctrls:1614:(hdl_sdtv_cap)->_lock vivid_ctrls:1620:(hdl_vid_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1614:(hdl_sdtv_cap)->_lock fs_reclaim irq_context: 0 &dev->mutex vivid_ctrls:1614:(hdl_sdtv_cap)->_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex vivid_ctrls:1614:(hdl_sdtv_cap)->_lock pool_lock#2 irq_context: 0 &dev->mutex vivid_ctrls:1616:(hdl_loop_cap)->_lock vivid_ctrls:1620:(hdl_vid_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1616:(hdl_loop_cap)->_lock fs_reclaim irq_context: 0 &dev->mutex vivid_ctrls:1616:(hdl_loop_cap)->_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex vivid_ctrls:1616:(hdl_loop_cap)->_lock pool_lock#2 irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock vivid_ctrls:1622:(hdl_vid_out)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1610:(hdl_user_aud)->_lock vivid_ctrls:1622:(hdl_vid_out)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock vivid_ctrls:1622:(hdl_vid_out)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock vivid_ctrls:1625:(hdl_vbi_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock vivid_ctrls:1625:(hdl_vbi_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1614:(hdl_sdtv_cap)->_lock vivid_ctrls:1625:(hdl_vbi_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1616:(hdl_loop_cap)->_lock vivid_ctrls:1625:(hdl_vbi_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock vivid_ctrls:1627:(hdl_vbi_out)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock vivid_ctrls:1627:(hdl_vbi_out)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock vivid_ctrls:1630:(hdl_radio_rx)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1610:(hdl_user_aud)->_lock vivid_ctrls:1630:(hdl_radio_rx)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock vivid_ctrls:1632:(hdl_radio_tx)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1610:(hdl_user_aud)->_lock vivid_ctrls:1632:(hdl_radio_tx)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock vivid_ctrls:1634:(hdl_sdr_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock vivid_ctrls:1634:(hdl_sdr_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock vivid_ctrls:1636:(hdl_meta_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock vivid_ctrls:1636:(hdl_meta_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock vivid_ctrls:1638:(hdl_meta_out)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock &zone->lock irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock vivid_ctrls:1638:(hdl_meta_out)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock vivid_ctrls:1640:(hdl_tch_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock vivid_ctrls:1640:(hdl_tch_cap)->_lock irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock &obj_hash[i].lock irq_context: 0 &adap->kthread_waitq irq_context: 0 &dev->mutex cec_devnode_lock irq_context: 0 &dev->cec_xfers_slock irq_context: 0 &dev->kthread_waitq_cec irq_context: 0 &dev->mutex subsys mutex#71 irq_context: 0 &dev->mutex pin_fs_lock irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 &dev->mutex &adap->lock irq_context: 0 &dev->mutex &adap->lock tk_core.seq.seqcount irq_context: 0 &dev->mutex &adap->lock &adap->devnode.lock_fhs irq_context: 0 &dev->mutex vivid_ctrls:1610:(hdl_user_aud)->_lock &c->lock irq_context: 0 &dev->mutex vivid_ctrls:1610:(hdl_user_aud)->_lock &____s->seqcount irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock &c->lock irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock &____s->seqcount irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex videodev_lock &rq->__lock irq_context: 0 &dev->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 &dev->mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 &dev->mutex rcu_read_lock rcu_node_0 irq_context: 0 &dev->mutex vivid_ctrls:1606:(hdl_user_gen)->_lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex vivid_ctrls:1614:(hdl_sdtv_cap)->_lock &c->lock irq_context: 0 &dev->mutex vivid_ctrls:1614:(hdl_sdtv_cap)->_lock &____s->seqcount irq_context: 0 &dev->mutex &meta->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &k->list_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex gdp_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex gdp_mutex &k->list_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex gdp_mutex fs_reclaim irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex gdp_mutex pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex gdp_mutex lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex bus_type_sem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &c->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &____s->seqcount irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &dev->power.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex dpm_list_mtx irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex uevent_sock_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex running_helpers_waitq.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex &k->k_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex subsys mutex#30 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex subsys mutex#30 &k->k_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex (console_sem).lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex console_lock console_srcu console_owner irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex input_ida.xa_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex fs_reclaim irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &x->wait#9 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &dev->mutex#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex chrdevs_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &k->list_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &c->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &zone->lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &____s->seqcount irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex bus_type_sem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &dev->power.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex dpm_list_mtx irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex req_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &p->pi_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &x->wait#11 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex uevent_sock_mutex irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex running_helpers_waitq.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &k->k_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex subsys mutex#30 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex subsys mutex#30 &k->k_lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex &mousedev->mutex/1 irq_context: 0 &dev->mutex vivid_ctrls:1608:(hdl_user_vid)->_lock &c->lock irq_context: 0 &dev->mutex vivid_ctrls:1608:(hdl_user_vid)->_lock &____s->seqcount irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex input_mutex input_devices_poll_wait.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex &serio->drv_mutex psmouse_mutex i8042_mutex pool_lock#2 irq_context: 0 (wq_completion)events_long serio_event_work serio_mutex &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex vivid_ctrls:1612:(hdl_streaming)->_lock &zone->lock irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 &zone->lock irq_context: 0 &dev->mutex vivid_ctrls:1610:(hdl_user_aud)->_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex vivid_ctrls:1610:(hdl_user_aud)->_lock &zone->lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &zone->lock irq_context: 0 &dev->mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &mm->mmap_lock rcu_node_0 irq_context: softirq mm/memcontrol.c:589 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex vivid_ctrls:1608:(hdl_user_vid)->_lock &pcp->lock &zone->lock irq_context: 0 &dev->mutex vivid_ctrls:1608:(hdl_user_vid)->_lock &zone->lock irq_context: 0 &dev->mutex vivid_ctrls:1610:(hdl_user_aud)->_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock rcu_node_0 irq_context: 0 &dev->mutex vivid_ctrls:1616:(hdl_loop_cap)->_lock &c->lock irq_context: 0 &dev->mutex vivid_ctrls:1616:(hdl_loop_cap)->_lock &____s->seqcount irq_context: 0 &dev->mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 &dev->mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 ptp_clocks_map.xa_lock irq_context: 0 subsys mutex#72 irq_context: 0 subsys mutex#72 &k->k_lock irq_context: 0 pers_lock irq_context: 0 _lock irq_context: 0 dm_bufio_clients_lock irq_context: 0 _ps_lock irq_context: 0 _lock#2 irq_context: 0 _lock#3 irq_context: 0 register_lock#2 irq_context: 0 subsys mutex#73 irq_context: 0 subsys mutex#73 &k->k_lock irq_context: 0 bp_lock irq_context: 0 bp_lock irq_context: 0 subsys mutex#74 irq_context: 0 subsys mutex#74 &k->k_lock irq_context: 0 leds_list_lock &led_cdev->trigger_lock irq_context: 0 rtnl_mutex lock#7 irq_context: softirq (&dsp_spl_tl) irq_context: softirq (&dsp_spl_tl) dsp_lock irq_context: softirq (&dsp_spl_tl) dsp_lock iclock_lock irq_context: softirq (&dsp_spl_tl) dsp_lock iclock_lock tk_core.seq.seqcount irq_context: softirq (&dsp_spl_tl) dsp_lock &obj_hash[i].lock irq_context: softirq (&dsp_spl_tl) dsp_lock &base->lock irq_context: softirq (&dsp_spl_tl) dsp_lock &base->lock &obj_hash[i].lock irq_context: 0 intf_mutex irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 iscsi_transport_lock irq_context: 0 subsys mutex#75 irq_context: 0 subsys mutex#75 &k->k_lock irq_context: 0 &tx_task->waiting irq_context: 0 link_ops_rwsem irq_context: 0 gdp_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 subsys mutex#76 irq_context: 0 subsys mutex#76 &k->k_lock irq_context: 0 service_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &x->wait#17 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &cfs_rq->removed.lock irq_context: 0 reading_mutex &x->wait#12 irq_context: 0 vsock_register_mutex irq_context: 0 comedi_drivers_list_lock irq_context: 0 subsys mutex#77 irq_context: 0 subsys mutex#77 &k->k_lock irq_context: 0 snd_ctl_layer_rwsem irq_context: 0 snd_card_mutex irq_context: 0 snd_ioctl_rwsem irq_context: 0 strings irq_context: 0 strings fs_reclaim irq_context: 0 strings fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 strings pool_lock#2 irq_context: 0 register_mutex irq_context: 0 sound_mutex irq_context: 0 sound_mutex fs_reclaim irq_context: 0 sound_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sound_mutex pool_lock#2 irq_context: 0 sound_mutex &k->list_lock irq_context: 0 sound_mutex gdp_mutex irq_context: 0 sound_mutex gdp_mutex &k->list_lock irq_context: 0 sound_mutex lock irq_context: 0 sound_mutex lock kernfs_idr_lock irq_context: 0 sound_mutex &root->kernfs_rwsem irq_context: 0 sound_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sound_mutex bus_type_sem irq_context: 0 sound_mutex sysfs_symlink_target_lock irq_context: 0 sound_mutex &c->lock irq_context: 0 sound_mutex &pcp->lock &zone->lock irq_context: 0 sound_mutex &zone->lock irq_context: 0 sound_mutex &____s->seqcount irq_context: 0 sound_mutex &root->kernfs_rwsem irq_context: 0 sound_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 sound_mutex &dev->power.lock irq_context: 0 sound_mutex dpm_list_mtx irq_context: 0 sound_mutex req_lock irq_context: 0 sound_mutex &p->pi_lock irq_context: 0 sound_mutex &p->pi_lock &rq->__lock irq_context: 0 sound_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sound_mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sound_mutex &rq->__lock irq_context: 0 sound_mutex &cfs_rq->removed.lock irq_context: 0 sound_mutex &obj_hash[i].lock irq_context: 0 sound_mutex &x->wait#11 irq_context: 0 sound_mutex uevent_sock_mutex irq_context: 0 sound_mutex rcu_read_lock &pool->lock/1 irq_context: 0 sound_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 sound_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 sound_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 sound_mutex running_helpers_waitq.lock irq_context: 0 sound_mutex subsys mutex#77 irq_context: 0 sound_mutex subsys mutex#77 &k->k_lock irq_context: 0 info_mutex &pcp->lock &zone->lock irq_context: 0 info_mutex &zone->lock irq_context: 0 register_mutex#2 irq_context: 0 register_mutex#3 irq_context: 0 register_mutex#3 fs_reclaim irq_context: 0 register_mutex#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 register_mutex#3 pool_lock#2 irq_context: 0 register_mutex#3 sound_mutex irq_context: 0 register_mutex#3 sound_mutex fs_reclaim irq_context: 0 register_mutex#3 sound_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 register_mutex#3 sound_mutex &c->lock irq_context: 0 register_mutex#3 sound_mutex &pcp->lock &zone->lock irq_context: 0 register_mutex#3 sound_mutex &zone->lock irq_context: 0 register_mutex#3 sound_mutex &____s->seqcount irq_context: 0 register_mutex#3 sound_mutex pool_lock#2 irq_context: 0 register_mutex#3 sound_mutex &k->list_lock irq_context: 0 register_mutex#3 sound_mutex gdp_mutex irq_context: 0 register_mutex#3 sound_mutex gdp_mutex &k->list_lock irq_context: 0 register_mutex#3 sound_mutex lock irq_context: 0 register_mutex#3 sound_mutex lock kernfs_idr_lock irq_context: 0 register_mutex#3 sound_mutex &root->kernfs_rwsem irq_context: 0 register_mutex#3 sound_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 register_mutex#3 sound_mutex bus_type_sem irq_context: 0 register_mutex#3 sound_mutex sysfs_symlink_target_lock irq_context: 0 register_mutex#3 sound_mutex &root->kernfs_rwsem irq_context: 0 register_mutex#3 sound_mutex &dev->power.lock irq_context: 0 register_mutex#3 sound_mutex dpm_list_mtx irq_context: 0 register_mutex#3 sound_mutex req_lock irq_context: 0 register_mutex#3 sound_mutex &p->pi_lock irq_context: 0 register_mutex#3 sound_mutex &p->pi_lock &rq->__lock irq_context: 0 register_mutex#3 sound_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 register_mutex#3 sound_mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 register_mutex#3 sound_mutex &rq->__lock irq_context: 0 register_mutex#3 sound_mutex &x->wait#11 irq_context: 0 register_mutex#3 sound_mutex &obj_hash[i].lock irq_context: 0 register_mutex#3 sound_mutex uevent_sock_mutex irq_context: 0 register_mutex#3 sound_mutex rcu_read_lock &pool->lock/1 irq_context: 0 register_mutex#3 sound_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 register_mutex#3 sound_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 register_mutex#3 sound_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 register_mutex#3 sound_mutex running_helpers_waitq.lock irq_context: 0 register_mutex#3 sound_mutex subsys mutex#77 irq_context: 0 register_mutex#3 sound_mutex subsys mutex#77 &k->k_lock irq_context: 0 register_mutex#3 clients_lock irq_context: 0 &client->ports_mutex irq_context: 0 &client->ports_mutex &client->ports_lock irq_context: 0 register_mutex#4 irq_context: 0 register_mutex#4 fs_reclaim irq_context: 0 register_mutex#4 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 register_mutex#4 pool_lock#2 irq_context: 0 register_mutex#4 sound_oss_mutex irq_context: 0 register_mutex#4 sound_oss_mutex fs_reclaim irq_context: 0 register_mutex#4 sound_oss_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 register_mutex#4 sound_oss_mutex pool_lock#2 irq_context: 0 register_mutex#4 sound_oss_mutex sound_loader_lock irq_context: 0 register_mutex#4 sound_oss_mutex &x->wait#9 irq_context: 0 register_mutex#4 sound_oss_mutex &obj_hash[i].lock irq_context: 0 register_mutex#4 sound_oss_mutex &k->list_lock irq_context: 0 register_mutex#4 sound_oss_mutex gdp_mutex irq_context: 0 register_mutex#4 sound_oss_mutex gdp_mutex &k->list_lock irq_context: 0 register_mutex#4 sound_oss_mutex lock irq_context: 0 register_mutex#4 sound_oss_mutex lock kernfs_idr_lock irq_context: 0 register_mutex#4 sound_oss_mutex &root->kernfs_rwsem irq_context: 0 register_mutex#4 sound_oss_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 register_mutex#4 sound_oss_mutex bus_type_sem irq_context: 0 register_mutex#4 sound_oss_mutex sysfs_symlink_target_lock irq_context: 0 register_mutex#4 sound_oss_mutex &root->kernfs_rwsem irq_context: 0 register_mutex#4 sound_oss_mutex &dev->power.lock irq_context: 0 register_mutex#4 sound_oss_mutex dpm_list_mtx irq_context: 0 register_mutex#4 sound_oss_mutex req_lock irq_context: 0 register_mutex#4 sound_oss_mutex &p->pi_lock irq_context: 0 register_mutex#4 sound_oss_mutex &p->pi_lock &rq->__lock irq_context: 0 register_mutex#4 sound_oss_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 register_mutex#4 sound_oss_mutex &rq->__lock irq_context: 0 register_mutex#4 sound_oss_mutex &x->wait#11 irq_context: 0 register_mutex#4 sound_oss_mutex &c->lock irq_context: 0 register_mutex#4 sound_oss_mutex &pcp->lock &zone->lock irq_context: 0 register_mutex#4 sound_oss_mutex &zone->lock irq_context: 0 register_mutex#4 sound_oss_mutex &____s->seqcount irq_context: 0 register_mutex#4 sound_oss_mutex uevent_sock_mutex irq_context: 0 register_mutex#4 sound_oss_mutex rcu_read_lock &pool->lock/1 irq_context: 0 register_mutex#4 sound_oss_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 register_mutex#4 sound_oss_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 register_mutex#4 sound_oss_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 register_mutex#4 sound_oss_mutex running_helpers_waitq.lock irq_context: 0 register_mutex#4 sound_oss_mutex subsys mutex#77 irq_context: 0 register_mutex#4 sound_oss_mutex subsys mutex#77 &k->k_lock irq_context: 0 register_mutex#4 sound_oss_mutex &obj_hash[i].lock pool_lock irq_context: 0 register_mutex#4 sound_oss_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 register_mutex#4 sound_oss_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 clients_lock irq_context: 0 &client->ports_lock irq_context: 0 &grp->list_mutex/1 irq_context: 0 &grp->list_mutex#2 irq_context: 0 &grp->list_mutex#2 &grp->list_lock irq_context: 0 &grp->list_mutex/1 clients_lock irq_context: 0 &grp->list_mutex/1 &client->ports_lock irq_context: 0 (wq_completion)events async_lookup_work irq_context: 0 (wq_completion)events async_lookup_work fs_reclaim irq_context: 0 (wq_completion)events async_lookup_work fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events async_lookup_work pool_lock#2 irq_context: 0 (wq_completion)events async_lookup_work clients_lock irq_context: 0 (wq_completion)events async_lookup_work &client->ports_lock irq_context: 0 (wq_completion)events async_lookup_work snd_card_mutex irq_context: 0 (wq_completion)events async_lookup_work (kmod_concurrent_max).lock irq_context: 0 (wq_completion)events async_lookup_work &obj_hash[i].lock irq_context: 0 (wq_completion)events async_lookup_work rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events async_lookup_work rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events async_lookup_work rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events async_lookup_work rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events async_lookup_work &x->wait#17 irq_context: 0 (wq_completion)events async_lookup_work &pool->lock irq_context: 0 (wq_completion)events async_lookup_work &rq->__lock irq_context: 0 (wq_completion)events async_lookup_work &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &grp->list_mutex/1 register_lock#3 irq_context: 0 &grp->list_mutex/1 fs_reclaim irq_context: 0 &grp->list_mutex/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &grp->list_mutex/1 pool_lock#2 irq_context: 0 &dev->mutex snd_card_mutex irq_context: 0 &dev->mutex &entry->access irq_context: 0 &dev->mutex info_mutex irq_context: 0 &dev->mutex info_mutex proc_subdir_lock irq_context: 0 &dev->mutex info_mutex fs_reclaim irq_context: 0 &dev->mutex info_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex info_mutex pool_lock#2 irq_context: 0 &dev->mutex info_mutex proc_inum_ida.xa_lock irq_context: 0 &dev->mutex info_mutex proc_subdir_lock irq_context: 0 &dev->mutex &card->controls_rwsem irq_context: 0 &dev->mutex &card->controls_rwsem &xa->xa_lock#12 irq_context: 0 &dev->mutex &card->controls_rwsem &xa->xa_lock#12 pool_lock#2 irq_context: 0 &dev->mutex &card->controls_rwsem fs_reclaim irq_context: 0 &dev->mutex &card->controls_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &card->controls_rwsem &card->ctl_files_rwlock irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem irq_context: 0 &dev->mutex &card->controls_rwsem &xa->xa_lock#12 &c->lock irq_context: 0 &dev->mutex &card->controls_rwsem &xa->xa_lock#12 &pcp->lock &zone->lock irq_context: 0 &dev->mutex &card->controls_rwsem &xa->xa_lock#12 &zone->lock irq_context: 0 &dev->mutex &card->controls_rwsem &xa->xa_lock#12 &____s->seqcount irq_context: 0 &dev->mutex subsys mutex#77 irq_context: 0 &dev->mutex subsys mutex#77 &k->k_lock irq_context: 0 &dev->mutex register_mutex#2 irq_context: 0 &dev->mutex register_mutex#2 fs_reclaim irq_context: 0 &dev->mutex register_mutex#2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex register_mutex#2 pool_lock#2 irq_context: 0 &dev->mutex register_mutex#2 sound_mutex irq_context: 0 &dev->mutex register_mutex#2 sound_mutex fs_reclaim irq_context: 0 &dev->mutex register_mutex#2 sound_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex register_mutex#2 sound_mutex pool_lock#2 irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &k->list_lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex register_mutex#2 sound_mutex bus_type_sem irq_context: 0 &dev->mutex register_mutex#2 sound_mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &____s->seqcount irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &zone->lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &dev->power.lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex dpm_list_mtx irq_context: 0 &dev->mutex register_mutex#2 sound_mutex req_lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &p->pi_lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &rq->__lock irq_context: 0 (wq_completion)events async_lookup_work running_helpers_waitq.lock irq_context: 0 (wq_completion)events async_lookup_work rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events async_lookup_work rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events async_lookup_work autoload_work irq_context: 0 (wq_completion)events async_lookup_work &x->wait#10 irq_context: 0 (wq_completion)events async_lookup_work &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events async_lookup_work &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events async_lookup_work &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events autoload_work irq_context: 0 (wq_completion)events autoload_work &k->list_lock irq_context: 0 (wq_completion)events autoload_work &k->k_lock irq_context: 0 (wq_completion)events (work_completion)(&barr->work) irq_context: 0 (wq_completion)events (work_completion)(&barr->work) &x->wait#10 irq_context: 0 (wq_completion)events (work_completion)(&barr->work) &x->wait#10 &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &cfs_rq->removed.lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &x->wait#11 irq_context: 0 &dev->mutex register_mutex#2 sound_mutex uevent_sock_mutex irq_context: 0 &dev->mutex register_mutex#2 sound_mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex register_mutex#2 sound_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex register_mutex#2 sound_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex running_helpers_waitq.lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &k->k_lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex subsys mutex#77 irq_context: 0 &dev->mutex register_mutex#2 sound_mutex subsys mutex#77 &k->k_lock irq_context: 0 &dev->mutex register_mutex#2 &obj_hash[i].lock irq_context: 0 &dev->mutex register_mutex#2 &rq->__lock irq_context: 0 &dev->mutex register_mutex#2 register_mutex irq_context: 0 &dev->mutex register_mutex#2 &c->lock irq_context: 0 &dev->mutex register_mutex#2 &pcp->lock &zone->lock irq_context: 0 &dev->mutex register_mutex#2 &zone->lock irq_context: 0 &dev->mutex register_mutex#2 &____s->seqcount irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &c->lock irq_context: 0 &dev->mutex register_mutex#2 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex fs_reclaim irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex pool_lock#2 irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex sound_loader_lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &x->wait#9 irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &k->list_lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex bus_type_sem irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &c->lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &zone->lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &____s->seqcount irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &dev->power.lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex dpm_list_mtx irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex req_lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &p->pi_lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &rq->__lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &x->wait#11 irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex uevent_sock_mutex irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex running_helpers_waitq.lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &k->k_lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex subsys mutex#77 irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex subsys mutex#77 &k->k_lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex register_mutex#2 strings irq_context: 0 &dev->mutex register_mutex#2 strings fs_reclaim irq_context: 0 &dev->mutex register_mutex#2 strings fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex register_mutex#2 strings pool_lock#2 irq_context: 0 &dev->mutex register_mutex#2 &entry->access irq_context: 0 &dev->mutex register_mutex#2 info_mutex irq_context: 0 &dev->mutex sound_mutex irq_context: 0 &dev->mutex sound_mutex fs_reclaim irq_context: 0 &dev->mutex sound_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex sound_mutex pool_lock#2 irq_context: 0 &dev->mutex sound_mutex &k->list_lock irq_context: 0 &dev->mutex sound_mutex lock irq_context: 0 &dev->mutex sound_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex sound_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex sound_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex sound_mutex bus_type_sem irq_context: 0 &dev->mutex sound_mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex sound_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex sound_mutex &dev->power.lock irq_context: 0 &dev->mutex sound_mutex dpm_list_mtx irq_context: 0 &dev->mutex sound_mutex req_lock irq_context: 0 &dev->mutex sound_mutex &p->pi_lock irq_context: 0 &dev->mutex sound_mutex &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex sound_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex sound_mutex &rq->__lock irq_context: 0 &dev->mutex sound_mutex &x->wait#11 irq_context: 0 &dev->mutex sound_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex sound_mutex uevent_sock_mutex irq_context: 0 &dev->mutex sound_mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex sound_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex sound_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex sound_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex sound_mutex running_helpers_waitq.lock irq_context: 0 &dev->mutex sound_mutex &k->k_lock irq_context: 0 &dev->mutex sound_mutex subsys mutex#77 irq_context: 0 &dev->mutex sound_mutex subsys mutex#77 &k->k_lock irq_context: 0 &dev->mutex &card->controls_rwsem irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem snd_ctl_led_mutex irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem fs_reclaim irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem pool_lock#2 irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem &x->wait#9 irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem &obj_hash[i].lock irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem &k->list_lock irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem &c->lock irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem &pcp->lock &zone->lock irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem &zone->lock irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem &____s->seqcount irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem lock irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem lock kernfs_idr_lock irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem &root->kernfs_rwsem irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem bus_type_sem irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem &root->kernfs_rwsem irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem &dev->power.lock irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem dpm_list_mtx irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem &k->k_lock irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem sysfs_symlink_target_lock irq_context: 0 &dev->mutex info_mutex &____s->seqcount irq_context: 0 &dev->mutex info_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex info_mutex &zone->lock irq_context: 0 &dev->mutex info_mutex rcu_read_lock pool_lock#2 irq_context: 0 &dev->mutex info_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex info_mutex &c->lock irq_context: 0 &dev->mutex sound_oss_mutex irq_context: 0 &dev->mutex sound_oss_mutex fs_reclaim irq_context: 0 &dev->mutex sound_oss_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex sound_oss_mutex pool_lock#2 irq_context: 0 &dev->mutex sound_oss_mutex sound_loader_lock irq_context: 0 &dev->mutex sound_oss_mutex &x->wait#9 irq_context: 0 &dev->mutex sound_oss_mutex &obj_hash[i].lock irq_context: 0 &dev->mutex sound_oss_mutex &k->list_lock irq_context: 0 &dev->mutex sound_oss_mutex lock irq_context: 0 &dev->mutex sound_oss_mutex lock kernfs_idr_lock irq_context: 0 &dev->mutex sound_oss_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex sound_oss_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &dev->mutex sound_oss_mutex bus_type_sem irq_context: 0 &dev->mutex sound_oss_mutex sysfs_symlink_target_lock irq_context: 0 &dev->mutex sound_oss_mutex &root->kernfs_rwsem irq_context: 0 &dev->mutex sound_oss_mutex &c->lock irq_context: 0 &dev->mutex sound_oss_mutex &____s->seqcount irq_context: 0 &dev->mutex sound_oss_mutex &dev->power.lock irq_context: 0 &dev->mutex sound_oss_mutex dpm_list_mtx irq_context: 0 &dev->mutex sound_oss_mutex req_lock irq_context: 0 &dev->mutex sound_oss_mutex &p->pi_lock irq_context: 0 &dev->mutex sound_oss_mutex &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex sound_oss_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex sound_oss_mutex &rq->__lock irq_context: 0 &dev->mutex sound_oss_mutex &x->wait#11 irq_context: 0 &dev->mutex sound_oss_mutex uevent_sock_mutex irq_context: 0 &dev->mutex sound_oss_mutex rcu_read_lock &pool->lock/1 irq_context: 0 &dev->mutex sound_oss_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &dev->mutex sound_oss_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &dev->mutex sound_oss_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &dev->mutex sound_oss_mutex running_helpers_waitq.lock irq_context: 0 &dev->mutex sound_oss_mutex &k->k_lock irq_context: 0 &dev->mutex sound_oss_mutex subsys mutex#77 irq_context: 0 &dev->mutex sound_oss_mutex subsys mutex#77 &k->k_lock irq_context: 0 &dev->mutex strings irq_context: 0 &dev->mutex strings fs_reclaim irq_context: 0 &dev->mutex strings fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex strings pool_lock#2 irq_context: 0 &dev->mutex &card->controls_rwsem fs_reclaim irq_context: 0 &dev->mutex &card->controls_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &card->controls_rwsem pool_lock#2 irq_context: 0 &dev->mutex &card->controls_rwsem &c->lock irq_context: 0 &dev->mutex &card->controls_rwsem &____s->seqcount irq_context: 0 &dev->mutex &card->controls_rwsem &xa->xa_lock#12 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex uevent_sock_mutex &rq->__lock irq_context: 0 &dev->mutex uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex register_mutex#2 sound_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex register_mutex#2 sound_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex register_mutex#2 sound_mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &dev->mutex register_mutex#2 sound_oss_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex register_mutex#2 &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex sound_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &dev->mutex sound_mutex &c->lock irq_context: 0 &dev->mutex sound_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex sound_mutex &zone->lock irq_context: 0 &dev->mutex sound_mutex &____s->seqcount irq_context: 0 &dev->mutex sound_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex sound_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex sound_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex sound_oss_mutex &pcp->lock &zone->lock irq_context: 0 &dev->mutex sound_oss_mutex &zone->lock irq_context: 0 &dev->mutex sound_oss_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex sound_oss_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &dev->mutex sound_oss_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex register_mutex#5 irq_context: 0 &dev->mutex register_mutex#3 irq_context: 0 &dev->mutex register_mutex#3 fs_reclaim irq_context: 0 &dev->mutex register_mutex#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex register_mutex#3 pool_lock#2 irq_context: 0 &dev->mutex register_mutex#3 clients_lock irq_context: 0 &dev->mutex clients_lock irq_context: 0 &dev->mutex &client->ports_lock irq_context: 0 &dev->mutex &grp->list_mutex/1 irq_context: 0 &dev->mutex &grp->list_mutex/1 clients_lock irq_context: 0 &dev->mutex &grp->list_mutex/1 &client->ports_lock irq_context: 0 &dev->mutex &client->ports_mutex irq_context: 0 &dev->mutex &client->ports_mutex &client->ports_lock irq_context: 0 &dev->mutex &grp->list_mutex/1 register_lock#3 irq_context: 0 &dev->mutex &grp->list_mutex/1 fs_reclaim irq_context: 0 &dev->mutex &grp->list_mutex/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->mutex &grp->list_mutex/1 pool_lock#2 irq_context: 0 &dev->mutex sound_oss_mutex &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex register_mutex#3 &c->lock irq_context: 0 &dev->mutex register_mutex#3 &pcp->lock &zone->lock irq_context: 0 &dev->mutex register_mutex#3 &zone->lock irq_context: 0 &dev->mutex register_mutex#3 &____s->seqcount irq_context: 0 &dev->mutex &grp->list_mutex/1 &c->lock irq_context: 0 &dev->mutex &grp->list_mutex/1 &____s->seqcount irq_context: 0 &dev->mutex sound_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem &obj_hash[i].lock pool_lock irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &dev->mutex &card->controls_rwsem snd_ctl_layer_rwsem rcu_read_lock pool_lock#2 irq_context: softirq drivers/block/floppy.c:640 irq_context: softirq drivers/block/floppy.c:640 rcu_read_lock &pool->lock/1 irq_context: softirq drivers/block/floppy.c:640 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq drivers/block/floppy.c:640 rcu_read_lock &pool->lock/1 &obj_hash[i].lock pool_lock irq_context: softirq drivers/block/floppy.c:640 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq drivers/block/floppy.c:640 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq drivers/block/floppy.c:640 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_callback pool_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &pool->lock irq_context: softirq &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 nf_conntrack_expect_lock irq_context: 0 net_rwsem irq_context: softirq rcu_callback rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) &q->blkcg_mutex irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) &q->blkcg_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) &q->blkcg_mutex pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) (console_sem).lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 pernet_ops_rwsem rtnl_mutex failover_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex &irq_desc_lock_class irq_resend_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex proc_subdir_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex &ent->pde_unload_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex proc_inum_ida.xa_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &desc->request_mutex pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) console_owner_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) console_owner irq_context: 0 llc_sap_list_lock irq_context: 0 llc_sap_list_lock &c->lock irq_context: 0 llc_sap_list_lock &pcp->lock &zone->lock irq_context: 0 llc_sap_list_lock &zone->lock irq_context: 0 llc_sap_list_lock &____s->seqcount irq_context: 0 llc_sap_list_lock pool_lock#2 irq_context: 0 act_id_mutex irq_context: 0 act_id_mutex fs_reclaim irq_context: 0 act_id_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 act_id_mutex pool_lock#2 irq_context: 0 act_mod_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &root->kernfs_rwsem pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) klist_remove_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &k->k_lock klist_remove_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) sysfs_symlink_target_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) kernfs_idr_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &x->wait#10 irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) &xa->xa_lock#10 irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) pcpu_lock irq_context: 0 &pool->lock/1 &x->wait#10 irq_context: 0 &pool->lock/1 &x->wait#10 &p->pi_lock irq_context: 0 &pool->lock/1 &x->wait#10 &p->pi_lock &rq->__lock irq_context: 0 &pool->lock/1 &x->wait#10 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &pool->lock/1 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &pool->lock/1 rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &pool->lock/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &pool->lock/1 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 ife_mod_lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_connlabels_lock irq_context: 0 act_id_mutex &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) (&motor_off_timer[drive]) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->debugfs_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) cpu_hotplug_lock cpuhp_state_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &xa->xa_lock#10 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->unused_hctx_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->queue_lock &blkcg->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->queue_lock &blkcg->lock percpu_ref_switch_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->queue_lock &blkcg->lock percpu_ref_switch_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->queue_lock &blkcg->lock percpu_ref_switch_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) (&sq->pending_timer) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) (work_completion)(&td->dispatch_work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->blkcg_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->blkcg_mutex &q->queue_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->blkcg_mutex &q->queue_lock &blkcg->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->blkcg_mutex &q->queue_lock &blkcg->lock (&sq->pending_timer) irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->blkcg_mutex &q->queue_lock &blkcg->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->blkcg_mutex &q->queue_lock &blkcg->lock &base->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->blkcg_mutex &q->queue_lock &blkcg->lock percpu_counters_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->blkcg_mutex &q->queue_lock &blkcg->lock pcpu_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->blkcg_mutex &q->queue_lock &blkcg->lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) pcpu_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &xa->xa_lock#8 irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &fsnotify_mark_srcu irq_context: 0 rtnl_mutex uevent_sock_mutex &pcp->lock &zone->lock irq_context: 0 rtnl_mutex uevent_sock_mutex &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->blkcg_mutex &q->queue_lock &blkcg->lock pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&entry->work) &q->blkcg_mutex &rq->__lock irq_context: 0 cls_mod_lock irq_context: 0 ematch_mod_lock irq_context: 0 sock_diag_table_mutex irq_context: 0 nfnl_subsys_acct irq_context: 0 nfnl_subsys_queue irq_context: 0 nfnl_subsys_ulog irq_context: 0 nf_log_mutex irq_context: 0 nfnl_subsys_osf irq_context: 0 nf_sockopt_mutex irq_context: 0 nfnl_subsys_ctnetlink irq_context: 0 nfnl_subsys_ctnetlink_exp irq_context: 0 pernet_ops_rwsem nf_ct_ecache_mutex irq_context: 0 nfnl_subsys_cttimeout irq_context: 0 nfnl_subsys_cthelper irq_context: 0 nf_ct_helper_mutex irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) blk_queue_ida.xa_lock irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) percpu_ref_switch_lock irq_context: 0 (wq_completion)events (work_completion)(&blkg->free_work) &obj_hash[i].lock pool_lock irq_context: 0 nf_conntrack_mutex irq_context: 0 pernet_ops_rwsem nf_log_mutex irq_context: 0 pernet_ops_rwsem batched_entropy_u8.lock irq_context: 0 pernet_ops_rwsem kfence_freelist_lock irq_context: 0 nf_ct_nat_helpers_mutex irq_context: 0 nfnl_subsys_nftables irq_context: 0 nfnl_subsys_nftcompat irq_context: 0 masq_mutex irq_context: 0 masq_mutex pernet_ops_rwsem irq_context: 0 masq_mutex pernet_ops_rwsem rtnl_mutex irq_context: 0 masq_mutex (inetaddr_chain).rwsem irq_context: 0 masq_mutex inet6addr_chain.lock irq_context: 0 &xt[i].mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex &tn->lock irq_context: 0 subsys mutex#78 irq_context: 0 subsys mutex#78 &k->k_lock irq_context: 0 nfnl_subsys_ipset irq_context: 0 ip_set_type_mutex irq_context: 0 pernet_ops_rwsem ipvs->est_mutex irq_context: 0 pernet_ops_rwsem ipvs->est_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem ipvs->est_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem ipvs->est_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem ipvs->est_mutex pcpu_alloc_mutex irq_context: 0 pernet_ops_rwsem ipvs->est_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 pernet_ops_rwsem &base->lock irq_context: 0 pernet_ops_rwsem &base->lock &obj_hash[i].lock irq_context: 0 ip_vs_sched_mutex irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex &c->lock irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex &zone->lock irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex pool_lock#2 irq_context: 0 ip_vs_pe_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 tunnel4_mutex irq_context: 0 pernet_ops_rwsem net_generic_ids.xa_lock &c->lock irq_context: 0 pernet_ops_rwsem net_generic_ids.xa_lock &____s->seqcount irq_context: 0 pernet_ops_rwsem net_generic_ids.xa_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 xfrm4_protocol_mutex irq_context: 0 &xt[i].mutex fs_reclaim irq_context: 0 &xt[i].mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &xt[i].mutex pool_lock#2 irq_context: 0 inet_diag_table_mutex irq_context: 0 xfrm_km_lock irq_context: 0 xfrm_translator_lock irq_context: 0 xfrm6_protocol_mutex irq_context: 0 tunnel6_mutex irq_context: 0 xfrm_if_cb_lock irq_context: 0 inetsw6_lock irq_context: 0 &hashinfo->lock#2 irq_context: 0 pernet_ops_rwsem &hashinfo->lock#2 irq_context: 0 pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex quarantine_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex batched_entropy_u32.lock crngs.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &zone->lock irq_context: 0 (wq_completion)ipv6_addrconf irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (crypto_chain).rwsem irq_context: 0 (crypto_chain).rwsem fs_reclaim irq_context: 0 (crypto_chain).rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (crypto_chain).rwsem pool_lock#2 irq_context: 0 (crypto_chain).rwsem &c->lock irq_context: 0 (crypto_chain).rwsem &pcp->lock &zone->lock irq_context: 0 (crypto_chain).rwsem &zone->lock irq_context: 0 (crypto_chain).rwsem &____s->seqcount irq_context: 0 (crypto_chain).rwsem kthread_create_lock irq_context: 0 (crypto_chain).rwsem &p->pi_lock irq_context: 0 (crypto_chain).rwsem &x->wait irq_context: 0 (crypto_chain).rwsem &rq->__lock irq_context: 0 (crypto_chain).rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (crypto_chain).rwsem &obj_hash[i].lock irq_context: 0 (crypto_chain).rwsem &p->pi_lock &rq->__lock irq_context: 0 (crypto_chain).rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &x->wait#21 irq_context: 0 &x->wait#21 &p->pi_lock irq_context: 0 &x->wait#21 &p->pi_lock &rq->__lock irq_context: 0 &x->wait#21 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &p->alloc_lock &x->wait irq_context: 0 pernet_ops_rwsem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 stp_proto_mutex irq_context: 0 stp_proto_mutex llc_sap_list_lock irq_context: 0 stp_proto_mutex llc_sap_list_lock pool_lock#2 irq_context: 0 switchdev_notif_chain.lock irq_context: 0 (switchdev_blocking_notif_chain).rwsem irq_context: 0 br_ioctl_mutex irq_context: 0 nf_ct_proto_mutex irq_context: 0 ebt_mutex irq_context: 0 ebt_mutex fs_reclaim irq_context: 0 ebt_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 ebt_mutex pool_lock#2 irq_context: 0 dsa_tag_drivers_lock irq_context: 0 rtnl_mutex &tn->lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex batched_entropy_u32.lock crngs.lock irq_context: 0 rtnl_mutex lock kernfs_idr_lock &c->lock irq_context: 0 rtnl_mutex lock kernfs_idr_lock &____s->seqcount irq_context: 0 rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex quarantine_lock irq_context: 0 protocol_list_lock irq_context: 0 linkfail_lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 rtnl_mutex lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 rtnl_mutex lock kernfs_idr_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex &root->kernfs_rwsem &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 rose_neigh_list_lock irq_context: 0 proto_tab_lock#2 irq_context: 0 bt_proto_lock irq_context: 0 bt_proto_lock pool_lock#2 irq_context: 0 bt_proto_lock &dir->lock irq_context: 0 bt_proto_lock &obj_hash[i].lock irq_context: 0 bt_proto_lock chan_list_lock irq_context: 0 bt_proto_lock l2cap_sk_list.lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP slock-AF_BLUETOOTH-BTPROTO_L2CAP irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP chan_list_lock irq_context: 0 slock-AF_BLUETOOTH-BTPROTO_L2CAP irq_context: 0 rfcomm_wq.lock irq_context: 0 rfcomm_mutex irq_context: 0 auth_domain_lock irq_context: 0 registered_mechs_lock irq_context: 0 cpu_hotplug_lock wq_pool_mutex &n->list_lock irq_context: 0 atm_dev_notify_chain.lock irq_context: 0 genl_mutex irq_context: 0 rcu_read_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 proto_tab_lock#3 irq_context: 0 vlan_ioctl_mutex irq_context: 0 pernet_ops_rwsem (console_sem).lock irq_context: 0 pernet_ops_rwsem console_lock console_srcu console_owner_lock irq_context: 0 pernet_ops_rwsem console_lock console_srcu console_owner irq_context: 0 pernet_ops_rwsem console_lock console_srcu console_owner &port_lock_key irq_context: 0 pernet_ops_rwsem console_lock console_srcu console_owner console_owner_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock rcu_read_lock &ndev->lock irq_context: 0 rds_info_lock irq_context: 0 rds_trans_sem irq_context: 0 rds_trans_sem (console_sem).lock irq_context: 0 rds_trans_sem console_lock console_srcu console_owner_lock irq_context: 0 rds_trans_sem console_lock console_srcu console_owner irq_context: 0 rds_trans_sem console_lock console_srcu console_owner &port_lock_key irq_context: 0 rds_trans_sem console_lock console_srcu console_owner console_owner_lock irq_context: 0 &id_priv->lock irq_context: 0 lock#7 irq_context: 0 lock#7 fs_reclaim irq_context: 0 lock#7 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 lock#7 pool_lock#2 irq_context: 0 lock#7 &xa->xa_lock#13 irq_context: 0 lock#7 &xa->xa_lock#13 pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-slock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-clock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &zone->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &____s->seqcount irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &c->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &zone->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &____s->seqcount irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &c->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock k-clock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock k-clock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &h->lhash2[i].lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &wq->mutex irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 pernet_ops_rwsem wq_pool_mutex irq_context: 0 pernet_ops_rwsem wq_pool_mutex &wq->mutex irq_context: 0 pernet_ops_rwsem pcpu_lock irq_context: 0 pernet_ops_rwsem &list->lock#4 irq_context: 0 pernet_ops_rwsem &dir->lock#2 irq_context: 0 pernet_ops_rwsem ptype_lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &c->lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem rcu_read_lock rhashtable_bucket irq_context: 0 pernet_ops_rwsem k-clock-AF_TIPC irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC k-slock-AF_TIPC irq_context: 0 pernet_ops_rwsem k-slock-AF_TIPC irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &pnettable->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex smc_ib_devices.mutex irq_context: 0 smc_wr_rx_hash_lock irq_context: 0 v9fs_trans_lock irq_context: 0 pernet_ops_rwsem &this->receive_lock irq_context: 0 &x->wait#17 &p->pi_lock irq_context: 0 &x->wait#17 &p->pi_lock &rq->__lock irq_context: 0 &x->wait#17 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &x->wait#17 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 lowpan_nhc_lock irq_context: 0 rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 ovs_mutex irq_context: 0 pernet_ops_rwsem once_lock irq_context: 0 pernet_ops_rwsem once_lock crngs.lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 pernet_ops_rwsem &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 cpu_hotplug_lock cpuhp_state-up &dev->mutex &k->list_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &dev->mutex &k->k_lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &dev->mutex &dev->power.lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up subsys mutex#79 irq_context: 0 cpu_hotplug_lock cpuhp_state-up &base->lock irq_context: 0 cpu_hotplug_lock cpuhp_state-up &base->lock &obj_hash[i].lock irq_context: 0 cpu_hotplug_lock static_call_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) batched_entropy_u8.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) kfence_freelist_lock irq_context: 0 &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock irq_context: 0 &root->kernfs_rwsem kernfs_idr_lock pool_lock#2 irq_context: 0 kernfs_idr_lock &obj_hash[i].lock irq_context: 0 kernfs_idr_lock pool_lock#2 irq_context: 0 &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock pool_lock irq_context: 0 &sig->cred_guard_mutex &fs->lock &dentry->d_lock irq_context: 0 &root->kernfs_rwsem quarantine_lock irq_context: 0 &mm->mmap_lock fs_reclaim &rq->__lock irq_context: 0 &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &root->kernfs_rwsem &cfs_rq->removed.lock irq_context: 0 kernfs_idr_lock &obj_hash[i].lock pool_lock irq_context: 0 &root->kernfs_rwsem &meta->lock irq_context: 0 &root->kernfs_rwsem kfence_freelist_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fill_pool_map-wait-type-override &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events drain_vmap_work irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock purge_vmap_area_lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock pool_lock#2 irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock init_mm.page_table_lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock init_mm.page_table_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock init_mm.page_table_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock &rq->__lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock pool_lock#2 irq_context: 0 &root->kernfs_rwsem &root->kernfs_iattr_rwsem &cfs_rq->removed.lock irq_context: 0 &root->kernfs_rwsem &root->kernfs_iattr_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock quarantine_lock irq_context: softirq &(&gc_work->dwork)->timer irq_context: softirq &(&gc_work->dwork)->timer rcu_read_lock &pool->lock irq_context: softirq &(&gc_work->dwork)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&gc_work->dwork)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&gc_work->dwork)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&gc_work->dwork)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_node_0 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&ipvs->defense_work)->timer irq_context: softirq &(&ipvs->defense_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&ipvs->defense_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) pool_lock#2 irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) &s->s_inode_list_lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) &ipvs->dropentry_lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) &ipvs->droppacket_lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) &ipvs->securetcp_lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) &base->lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 uevent_sock_mutex &cfs_rq->removed.lock irq_context: 0 uevent_sock_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &base->lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) cgroup_threadgroup_rwsem pool_lock#2 irq_context: 0 &root->kernfs_rwsem &base->lock irq_context: 0 &root->kernfs_rwsem &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock quarantine_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fs_reclaim &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fs_reclaim &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock pool_lock#2 irq_context: 0 lock map_idr_lock irq_context: 0 lock map_idr_lock pool_lock#2 irq_context: 0 &sb->s_type->i_lock_key#15 &dentry->d_lock irq_context: 0 purge_vmap_area_lock irq_context: 0 lock prog_idr_lock irq_context: 0 lock prog_idr_lock pool_lock#2 irq_context: 0 bpf_lock irq_context: 0 rcu_read_lock_trace fs_reclaim irq_context: 0 rcu_read_lock_trace fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rcu_read_lock_trace pool_lock#2 irq_context: 0 rcu_read_lock_trace &obj_hash[i].lock irq_context: 0 rcu_read_lock_trace &c->lock irq_context: 0 rcu_read_lock_trace &pcp->lock &zone->lock irq_context: 0 rcu_read_lock_trace &zone->lock irq_context: 0 rcu_read_lock_trace &____s->seqcount irq_context: 0 rcu_read_lock_trace lock irq_context: 0 rcu_read_lock_trace lock btf_idr_lock irq_context: 0 rcu_read_lock_trace lock btf_idr_lock pool_lock#2 irq_context: 0 rcu_read_lock_trace &newf->file_lock irq_context: 0 rcu_read_lock_trace &sb->s_type->i_lock_key#15 irq_context: 0 rcu_read_lock_trace &sb->s_type->i_lock_key#15 &dentry->d_lock irq_context: 0 rcu_read_lock_trace lock map_idr_lock irq_context: 0 rcu_read_lock_trace &map->freeze_mutex irq_context: 0 key_types_sem irq_context: 0 key_types_sem asymmetric_key_parsers_sem irq_context: 0 key_types_sem asymmetric_key_parsers_sem fs_reclaim irq_context: 0 key_types_sem asymmetric_key_parsers_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 key_types_sem asymmetric_key_parsers_sem pool_lock#2 irq_context: 0 key_types_sem asymmetric_key_parsers_sem &c->lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem &pcp->lock &zone->lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem &zone->lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem &____s->seqcount irq_context: 0 key_types_sem asymmetric_key_parsers_sem crypto_alg_sem irq_context: 0 key_types_sem asymmetric_key_parsers_sem &obj_hash[i].lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem crypto_alg_sem irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem fs_reclaim irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem &c->lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem &pcp->lock &zone->lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem &zone->lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem &____s->seqcount irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem pool_lock#2 irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem kthread_create_lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem &p->pi_lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem &p->pi_lock &rq->__lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem &x->wait irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem &rq->__lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 key_types_sem asymmetric_key_parsers_sem (crypto_chain).rwsem &obj_hash[i].lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem &x->wait#21 irq_context: 0 key_types_sem asymmetric_key_parsers_sem &base->lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem &base->lock &obj_hash[i].lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem &rq->__lock irq_context: 0 key_types_sem asymmetric_key_parsers_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 key_types_sem asymmetric_key_parsers_sem (&timer.timer) irq_context: 0 key_types_sem asymmetric_key_parsers_sem rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &meta->lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) kfence_freelist_lock irq_context: 0 key_types_sem &type->lock_class irq_context: 0 key_types_sem &type->lock_class fs_reclaim irq_context: 0 key_types_sem &type->lock_class fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 key_types_sem &type->lock_class pool_lock#2 irq_context: 0 key_types_sem &type->lock_class &c->lock irq_context: 0 key_types_sem &type->lock_class &pcp->lock &zone->lock irq_context: 0 key_types_sem &type->lock_class &zone->lock irq_context: 0 key_types_sem &type->lock_class &____s->seqcount irq_context: 0 key_types_sem &type->lock_class key_user_lock irq_context: 0 key_types_sem &type->lock_class crngs.lock irq_context: 0 key_types_sem &type->lock_class key_serial_lock irq_context: 0 key_types_sem &type->lock_class key_construction_mutex irq_context: 0 key_types_sem &type->lock_class key_construction_mutex &obj_hash[i].lock irq_context: 0 key_types_sem &type->lock_class key_construction_mutex pool_lock#2 irq_context: 0 key_types_sem &type->lock_class ima_keys_lock irq_context: 0 key_types_sem &obj_hash[i].lock irq_context: 0 key_types_sem pool_lock#2 irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &pcp->lock &zone->lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex &zone->lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex crypto_alg_sem irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex scomp_lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex scomp_lock fs_reclaim irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex scomp_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex scomp_lock pool_lock#2 irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex scomp_lock free_vmap_area_lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex scomp_lock vmap_area_lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex scomp_lock &____s->seqcount irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex scomp_lock init_mm.page_table_lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex scomp_lock &pcp->lock &zone->lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex scomp_lock &zone->lock irq_context: 0 cpu_hotplug_lock cpuhp_state_mutex scomp_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 slab_mutex lock irq_context: 0 slab_mutex lock kernfs_idr_lock irq_context: 0 slab_mutex &root->kernfs_rwsem irq_context: 0 slab_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 slab_mutex &k->list_lock irq_context: 0 slab_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 slab_mutex lock kernfs_idr_lock &c->lock irq_context: 0 slab_mutex lock kernfs_idr_lock &____s->seqcount irq_context: 0 slab_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&net->can.stattimer) irq_context: softirq (&net->can.stattimer) &obj_hash[i].lock irq_context: softirq (&net->can.stattimer) &base->lock irq_context: softirq (&net->can.stattimer) &base->lock &obj_hash[i].lock irq_context: 0 slab_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 slab_mutex lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 slab_mutex lock kernfs_idr_lock &zone->lock irq_context: 0 slab_mutex lock kernfs_idr_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events (debug_obj_work).work pool_lock#2 irq_context: 0 slab_mutex fs_reclaim &rq->__lock irq_context: softirq (&vblank->disable_timer) irq_context: softirq (&vblank->disable_timer) &dev->vbl_lock irq_context: softirq (&vblank->disable_timer) &dev->vbl_lock &dev->vblank_time_lock irq_context: softirq (&vblank->disable_timer) &dev->vbl_lock &dev->vblank_time_lock hrtimer_bases.lock irq_context: softirq (&vblank->disable_timer) &dev->vbl_lock &dev->vblank_time_lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 slab_mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 slab_mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 slab_mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) &base->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) &base->lock &obj_hash[i].lock irq_context: 0 pcpu_drain_mutex &pcp->lock irq_context: 0 pcpu_drain_mutex &pcp->lock &zone->lock irq_context: 0 pcpu_drain_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: softirq &(&ipvs->defense_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&tbl->managed_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&ipvs->defense_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&tbl->managed_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&ipvs->defense_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&q->timeout) irq_context: softirq (&q->timeout) rcu_read_lock &pool->lock irq_context: softirq (&q->timeout) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq (&q->timeout) rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq (&q->timeout) rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq (&q->timeout) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq (&q->timeout) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)kblockd (work_completion)(&q->timeout_work) irq_context: 0 &sb->s_type->i_mutex_key#3 batched_entropy_u8.lock crngs.lock base_crng.lock irq_context: 0 key_types_sem &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC k-slock-AF_RXRPC irq_context: 0 pernet_ops_rwsem k-slock-AF_RXRPC irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex crngs.lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &c->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &sb->s_type->i_lock_key#8 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &dir->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 &table->hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 &table->hash[i].lock k-clock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 &table->hash[i].lock &table->hash2[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-slock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 rcu_read_lock &pool->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &rq->__lock irq_context: 0 (wq_completion)events netstamp_work irq_context: 0 (wq_completion)events netstamp_work cpu_hotplug_lock irq_context: 0 (wq_completion)events netstamp_work cpu_hotplug_lock jump_label_mutex irq_context: 0 (wq_completion)events netstamp_work cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 (wq_completion)events netstamp_work cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex kthread_create_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &p->pi_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &x->wait irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &x->wait#22 irq_context: 0 &x->wait#22 irq_context: 0 &x->wait#22 &p->pi_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &local->services_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC fs_reclaim irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC pool_lock#2 irq_context: 0 pernet_ops_rwsem &rxnet->conn_lock irq_context: 0 pernet_ops_rwsem &call->waitq irq_context: 0 pernet_ops_rwsem &rx->call_lock irq_context: 0 pernet_ops_rwsem &rxnet->call_lock irq_context: 0 bio_slab_lock slab_mutex &root->kernfs_rwsem irq_context: 0 bio_slab_lock slab_mutex &k->list_lock irq_context: 0 bio_slab_lock slab_mutex &pcp->lock &zone->lock irq_context: 0 bio_slab_lock slab_mutex &zone->lock irq_context: 0 bio_slab_lock slab_mutex &____s->seqcount irq_context: 0 bio_slab_lock slab_mutex lock irq_context: 0 bio_slab_lock slab_mutex lock kernfs_idr_lock irq_context: 0 bio_slab_lock slab_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 bio_slab_lock slab_mutex lock kernfs_idr_lock pool_lock#2 irq_context: softirq (&rxnet->peer_keepalive_timer) irq_context: softirq (&rxnet->peer_keepalive_timer) rcu_read_lock &pool->lock/1 irq_context: softirq (&rxnet->peer_keepalive_timer) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq (&rxnet->peer_keepalive_timer) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 init_user_ns.keyring_sem irq_context: 0 init_user_ns.keyring_sem key_user_lock irq_context: 0 init_user_ns.keyring_sem root_key_user.lock irq_context: 0 init_user_ns.keyring_sem fs_reclaim irq_context: 0 init_user_ns.keyring_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 init_user_ns.keyring_sem pool_lock#2 irq_context: 0 init_user_ns.keyring_sem crngs.lock irq_context: 0 init_user_ns.keyring_sem key_serial_lock irq_context: 0 init_user_ns.keyring_sem key_construction_mutex irq_context: 0 init_user_ns.keyring_sem &type->lock_class irq_context: 0 init_user_ns.keyring_sem &type->lock_class keyring_serialise_link_lock irq_context: 0 init_user_ns.keyring_sem &type->lock_class keyring_serialise_link_lock fs_reclaim irq_context: 0 init_user_ns.keyring_sem &type->lock_class keyring_serialise_link_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)krxrpcd irq_context: 0 init_user_ns.keyring_sem &type->lock_class keyring_serialise_link_lock pool_lock#2 irq_context: 0 init_user_ns.keyring_sem &type->lock_class keyring_serialise_link_lock root_key_user.lock irq_context: 0 init_user_ns.keyring_sem &type->lock_class keyring_serialise_link_lock key_construction_mutex irq_context: 0 (wq_completion)krxrpcd (work_completion)(&rxnet->peer_keepalive_work) irq_context: 0 init_user_ns.keyring_sem &type->lock_class keyring_serialise_link_lock key_construction_mutex keyring_name_lock irq_context: 0 init_user_ns.keyring_sem &type->lock_class keyring_serialise_link_lock key_construction_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)krxrpcd (work_completion)(&rxnet->peer_keepalive_work) &rxnet->peer_hash_lock irq_context: 0 init_user_ns.keyring_sem &type->lock_class keyring_serialise_link_lock key_construction_mutex pool_lock#2 irq_context: 0 (wq_completion)krxrpcd (work_completion)(&rxnet->peer_keepalive_work) &obj_hash[i].lock irq_context: 0 init_user_ns.keyring_sem keyring_serialise_link_lock irq_context: 0 (wq_completion)krxrpcd (work_completion)(&rxnet->peer_keepalive_work) &base->lock irq_context: 0 (wq_completion)krxrpcd (work_completion)(&rxnet->peer_keepalive_work) &base->lock &obj_hash[i].lock irq_context: 0 init_user_ns.keyring_sem key_construction_mutex keyring_name_lock irq_context: 0 init_user_ns.keyring_sem &type->lock_class keyring_serialise_link_lock &obj_hash[i].lock irq_context: 0 template_list irq_context: 0 idr_lock irq_context: 0 ima_extend_list_mutex irq_context: 0 ima_extend_list_mutex fs_reclaim irq_context: 0 ima_extend_list_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 ima_extend_list_mutex pool_lock#2 irq_context: 0 pci_bus_sem irq_context: 0 clk_debug_lock irq_context: 0 (wq_completion)events_unbound deferred_probe_work irq_context: 0 deferred_probe_work irq_context: 0 (wq_completion)events_unbound deferred_probe_work deferred_probe_mutex irq_context: 0 &x->wait#10 irq_context: 0 (wq_completion)events_unbound (work_completion)(&barr->work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&barr->work) &x->wait#10 irq_context: 0 (wq_completion)events_unbound (work_completion)(&barr->work) &x->wait#10 &p->pi_lock irq_context: 0 dpm_list_mtx (console_sem).lock irq_context: 0 dpm_list_mtx console_lock console_srcu console_owner_lock irq_context: 0 dpm_list_mtx console_lock console_srcu console_owner irq_context: 0 dpm_list_mtx console_lock console_srcu console_owner &port_lock_key irq_context: 0 dpm_list_mtx console_lock console_srcu console_owner console_owner_lock irq_context: 0 console_mutex &root->kernfs_rwsem irq_context: 0 console_mutex kernfs_notify_lock irq_context: 0 console_mutex kernfs_notify_lock rcu_read_lock &pool->lock irq_context: 0 console_mutex kernfs_notify_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 console_mutex kernfs_notify_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 console_mutex kernfs_notify_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 console_mutex kernfs_notify_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 console_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &(&net->nexthop.notifier_chain)->rwsem irq_context: 0 k-sk_lock-AF_INET irq_context: 0 k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 k-sk_lock-AF_INET &table->hash[i].lock irq_context: 0 k-sk_lock-AF_INET &table->hash[i].lock k-clock-AF_INET irq_context: 0 k-sk_lock-AF_INET &table->hash[i].lock &table->hash2[i].lock irq_context: 0 k-slock-AF_INET irq_context: 0 k-sk_lock-AF_INET6 irq_context: 0 k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 k-slock-AF_INET6 irq_context: 0 k-sk_lock-AF_INET6 &table->hash[i].lock irq_context: 0 k-sk_lock-AF_INET6 &table->hash[i].lock k-clock-AF_INET6 irq_context: 0 k-sk_lock-AF_INET6 &table->hash[i].lock &table->hash2[i].lock irq_context: 0 reg_requests_lock irq_context: 0 (wq_completion)events reg_work irq_context: 0 (wq_completion)events reg_work rtnl_mutex irq_context: 0 (wq_completion)events reg_work rtnl_mutex reg_requests_lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)events reg_work rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events reg_work rtnl_mutex pool_lock#2 irq_context: 0 (wq_completion)events reg_work rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)events reg_work rtnl_mutex reg_pending_beacons_lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex &rdev->wiphy.mtx irq_context: 0 (wq_completion)events reg_work rtnl_mutex &rdev->wiphy.mtx reg_requests_lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex &base->lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &fw_cache.lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &fw_cache.lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) async_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) init_task.alloc_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) init_task.alloc_lock init_fs.lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) init_task.alloc_lock init_fs.lock &dentry->d_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rcu_read_lock &____s->seqcount#4 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &sb->s_type->i_mutex_key irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &sb->s_type->i_mutex_key fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &sb->s_type->i_mutex_key fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &sb->s_type->i_mutex_key pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &sb->s_type->i_mutex_key &dentry->d_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &sb->s_type->i_mutex_key rcu_read_lock rename_lock.seqcount irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &sb->s_type->i_mutex_key &dentry->d_lock &wq irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &dentry->d_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &base->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &c->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &____s->seqcount irq_context: 0 detector_work irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) (console_sem).lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) console_lock console_srcu console_owner irq_context: 0 &wq->mutex &pool->lock/1 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 &wq->mutex &x->wait#10 irq_context: 0 &pool->lock/1 rcu_read_lock &pool->lock irq_context: 0 &pool->lock/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &pool->lock/1 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 acpi_gpio_deferred_req_irqs_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) console_owner_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) console_owner irq_context: softirq fs/file_table.c:431 irq_context: softirq fs/file_table.c:431 rcu_read_lock &pool->lock irq_context: softirq fs/file_table.c:431 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq fs/file_table.c:431 rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq fs/file_table.c:431 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq fs/file_table.c:431 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (delayed_fput_work).work irq_context: 0 (wq_completion)events (delayed_fput_work).work &obj_hash[i].lock irq_context: 0 (wq_completion)events (delayed_fput_work).work pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem usermodehelper_disabled_waitq.lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &x->wait#9 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &k->list_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem gdp_mutex irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem gdp_mutex &k->list_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem gdp_mutex fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem gdp_mutex pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem gdp_mutex lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem gdp_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem gdp_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem lock kernfs_idr_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &root->kernfs_rwsem irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem bus_type_sem irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem sysfs_symlink_target_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &c->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &root->kernfs_rwsem irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem lock kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &dev->power.lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem dpm_list_mtx irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &k->k_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem subsys mutex#80 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem subsys mutex#80 &k->k_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem fw_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem uevent_sock_mutex irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem running_helpers_waitq.lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &x->wait#23 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &base->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_lock_key#2 irq_context: 0 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 tomoyo_ss &obj_hash[i].lock irq_context: 0 tomoyo_ss &c->lock irq_context: 0 tomoyo_ss &pcp->lock &zone->lock irq_context: 0 tomoyo_ss &pcp->lock &zone->lock &____s->seqcount irq_context: 0 tomoyo_ss &____s->seqcount irq_context: 0 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 tomoyo_ss tomoyo_log_lock irq_context: 0 tomoyo_ss tomoyo_log_wait.lock irq_context: 0 cdev_lock irq_context: 0 tty_mutex (console_sem).lock irq_context: 0 tty_mutex console_lock irq_context: 0 tty_mutex fs_reclaim irq_context: 0 tty_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 tty_mutex pool_lock#2 irq_context: 0 tty_mutex tty_ldiscs_lock irq_context: 0 tty_mutex &obj_hash[i].lock irq_context: 0 tty_mutex &k->list_lock irq_context: 0 tty_mutex &k->k_lock irq_context: 0 tty_mutex &tty->legacy_mutex irq_context: 0 tty_mutex &tty->legacy_mutex &tty->read_wait irq_context: 0 tty_mutex &tty->legacy_mutex &tty->write_wait irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem fs_reclaim irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem pool_lock#2 irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem &c->lock irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem &____s->seqcount irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem free_vmap_area_lock irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem free_vmap_area_lock &obj_hash[i].lock irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem free_vmap_area_lock pool_lock#2 irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem vmap_area_lock irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem &pcp->lock &zone->lock irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem &zone->lock irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem &tty->write_wait irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem &tty->read_wait irq_context: 0 tty_mutex &tty->legacy_mutex &tty->ldisc_sem &tty->termios_rwsem irq_context: 0 &tty->legacy_mutex irq_context: 0 &tty->legacy_mutex &tty->files_lock irq_context: 0 &tty->legacy_mutex &port->lock irq_context: 0 &tty->legacy_mutex &port->mutex irq_context: 0 &tty->legacy_mutex &port->mutex fs_reclaim irq_context: 0 &tty->legacy_mutex &port->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &tty->legacy_mutex &port->mutex &____s->seqcount irq_context: 0 &tty->legacy_mutex &port->mutex pool_lock#2 irq_context: 0 &tty->legacy_mutex &port->mutex &port_lock_key irq_context: 0 &tty->legacy_mutex &port->mutex hash_mutex irq_context: 0 &tty->legacy_mutex &port->mutex hash_mutex fs_reclaim irq_context: 0 &tty->legacy_mutex &port->mutex hash_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &tty->legacy_mutex &port->mutex hash_mutex pool_lock#2 irq_context: 0 &tty->legacy_mutex &port->mutex &i->lock irq_context: 0 &tty->legacy_mutex &port->mutex &desc->request_mutex irq_context: 0 &tty->legacy_mutex &port->mutex &desc->request_mutex &irq_desc_lock_class irq_context: 0 &tty->legacy_mutex &port->mutex &desc->request_mutex &irq_desc_lock_class vector_lock irq_context: 0 &tty->legacy_mutex &port->mutex &desc->request_mutex &irq_desc_lock_class ioapic_lock irq_context: 0 &tty->legacy_mutex &port->mutex &desc->request_mutex &irq_desc_lock_class mask_lock irq_context: 0 &tty->legacy_mutex &port->mutex &desc->request_mutex &irq_desc_lock_class mask_lock tmp_mask_lock irq_context: 0 &tty->legacy_mutex &port->mutex &desc->request_mutex &irq_desc_lock_class mask_lock tmp_mask_lock vector_lock irq_context: 0 &tty->legacy_mutex &port->mutex &desc->request_mutex &irq_desc_lock_class mask_lock tmp_mask_lock ioapic_lock irq_context: 0 &tty->legacy_mutex &port->mutex &desc->request_mutex &irq_desc_lock_class ioapic_lock i8259A_lock irq_context: 0 &tty->legacy_mutex &port->mutex register_lock irq_context: 0 &tty->legacy_mutex &port->mutex &irq_desc_lock_class irq_context: 0 &tty->legacy_mutex &port->mutex proc_subdir_lock irq_context: 0 &tty->legacy_mutex &port->mutex &c->lock irq_context: 0 &tty->legacy_mutex &port->mutex proc_inum_ida.xa_lock irq_context: 0 &tty->legacy_mutex &port->mutex proc_subdir_lock irq_context: hardirq &i->lock irq_context: 0 &tty->legacy_mutex &port_lock_key irq_context: 0 &type->s_umount_key#26/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#26/1 bit_wait_table + i irq_context: 0 &type->s_umount_key#26/1 &rq->__lock irq_context: 0 &type->s_umount_key#26/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq bit_wait_table + i &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &type->s_umount_key#26/1 &sb->s_type->i_lock_key#3 irq_context: 0 &type->s_umount_key#26/1 &sb->s_type->i_lock_key#3 &xa->xa_lock#8 irq_context: 0 &type->s_umount_key#26/1 lock#4 &lruvec->lru_lock irq_context: 0 &type->s_umount_key#26/1 lock#5 irq_context: 0 &type->s_umount_key#26/1 &lruvec->lru_lock irq_context: 0 &type->s_umount_key#26/1 rcu_read_lock pool_lock#2 irq_context: 0 &type->s_umount_key#26/1 crypto_alg_sem irq_context: 0 &type->s_umount_key#26/1 lock#3 irq_context: 0 &type->s_umount_key#26/1 lock#3 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#26/1 lock#3 rcu_read_lock &pool->lock irq_context: 0 &type->s_umount_key#26/1 lock#3 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#26/1 lock#3 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &type->s_umount_key#26/1 lock#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#26/1 lock#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#26/1 lock#3 &rq->__lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(work) irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(work) lock#4 irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(work) lock#4 &lruvec->lru_lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(work) lock#5 irq_context: 0 &type->s_umount_key#26/1 lock#3 (work_completion)(work) irq_context: 0 &type->s_umount_key#26/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#26/1 sb_lock irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#27/1 irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#27/1 fs_reclaim irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#27/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#27/1 pcpu_alloc_mutex irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#27/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#27/1 shrinker_rwsem irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#27/1 list_lrus_mutex irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#27/1 sb_lock irq_context: 0 &type->s_umount_key#27/1 irq_context: 0 &type->s_umount_key#27/1 fs_reclaim irq_context: 0 &type->s_umount_key#27/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#27/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#27/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#27/1 &zone->lock irq_context: 0 &type->s_umount_key#27/1 &____s->seqcount irq_context: 0 &type->s_umount_key#27/1 &xa->xa_lock#8 irq_context: 0 &type->s_umount_key#27/1 lock#4 irq_context: 0 &type->s_umount_key#27/1 &mapping->private_lock irq_context: 0 &type->s_umount_key#27/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#27/1 &dd->lock irq_context: 0 &type->s_umount_key#27/1 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#27/1 rcu_read_lock &pool->lock irq_context: 0 &type->s_umount_key#27/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#27/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &type->s_umount_key#27/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#27/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#27/1 bit_wait_table + i irq_context: 0 &type->s_umount_key#27/1 &rq->__lock irq_context: 0 &type->s_umount_key#27/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#27/1 &sb->s_type->i_lock_key#3 irq_context: 0 &type->s_umount_key#27/1 &sb->s_type->i_lock_key#3 &xa->xa_lock#8 irq_context: 0 &type->s_umount_key#27/1 lock#4 &lruvec->lru_lock irq_context: 0 &type->s_umount_key#27/1 lock#5 irq_context: 0 &type->s_umount_key#27/1 &lruvec->lru_lock irq_context: softirq bit_wait_table + i &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &type->s_umount_key#27/1 crypto_alg_sem irq_context: 0 &type->s_umount_key#27/1 lock#3 irq_context: 0 &type->s_umount_key#27/1 lock#3 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#27/1 lock#3 rcu_read_lock &pool->lock irq_context: 0 &type->s_umount_key#27/1 lock#3 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#27/1 lock#3 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &type->s_umount_key#27/1 lock#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#27/1 lock#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#27/1 lock#3 &rq->__lock irq_context: 0 &type->s_umount_key#27/1 lock#3 (work_completion)(work) irq_context: 0 &type->s_umount_key#27/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#27/1 sb_lock irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#28/1 irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#28/1 fs_reclaim irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#28/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#28/1 pcpu_alloc_mutex irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#28/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#28/1 shrinker_rwsem irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#28/1 list_lrus_mutex irq_context: 0 &bdev->bd_fsfreeze_mutex &type->s_umount_key#28/1 sb_lock irq_context: 0 &type->s_umount_key#28/1 irq_context: 0 &type->s_umount_key#28/1 fs_reclaim irq_context: 0 &type->s_umount_key#28/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#28/1 &c->lock irq_context: 0 &type->s_umount_key#28/1 &____s->seqcount irq_context: 0 &type->s_umount_key#28/1 pool_lock#2 irq_context: 0 &type->s_umount_key#28/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#28/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#28/1 &zone->lock irq_context: 0 &type->s_umount_key#28/1 &xa->xa_lock#8 irq_context: 0 &type->s_umount_key#28/1 lock#4 irq_context: 0 &type->s_umount_key#28/1 &mapping->private_lock irq_context: 0 &type->s_umount_key#28/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#28/1 &dd->lock irq_context: 0 &type->s_umount_key#28/1 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#28/1 rcu_read_lock &pool->lock irq_context: 0 &type->s_umount_key#28/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#28/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &type->s_umount_key#28/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#28/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#28/1 bit_wait_table + i irq_context: 0 &type->s_umount_key#28/1 &rq->__lock irq_context: 0 &type->s_umount_key#28/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_lock_key#3 irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_lock_key#3 &xa->xa_lock#8 irq_context: 0 &type->s_umount_key#28/1 lock#4 &lruvec->lru_lock irq_context: 0 &type->s_umount_key#28/1 lock#5 irq_context: 0 &type->s_umount_key#28/1 &lruvec->lru_lock irq_context: 0 &type->s_umount_key#28/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->s_umount_key#28/1 crypto_alg_sem irq_context: 0 &type->s_umount_key#28/1 &xa->xa_lock#8 pool_lock#2 irq_context: 0 &type->s_umount_key#28/1 rcu_read_lock pool_lock#2 irq_context: 0 &type->s_umount_key#28/1 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &type->s_umount_key#28/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#28/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#28/1 percpu_counters_lock irq_context: 0 &type->s_umount_key#28/1 &obj_hash[i].lock pool_lock irq_context: 0 &type->s_umount_key#28/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#28/1 inode_hash_lock irq_context: 0 &type->s_umount_key#28/1 inode_hash_lock &sb->s_type->i_lock_key#22 irq_context: 0 &type->s_umount_key#28/1 inode_hash_lock &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#28/1 rcu_read_lock &dd->lock irq_context: 0 &type->s_umount_key#28/1 rcu_read_lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#28/1 rcu_read_lock tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#28/1 rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 (wq_completion)events (work_completion)(&s->destroy_work) &rsp->gp_wait irq_context: 0 (wq_completion)events (work_completion)(&s->destroy_work) pcpu_lock irq_context: 0 (wq_completion)events (work_completion)(&s->destroy_work) &obj_hash[i].lock irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_lock_key#22 irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_mutex_key#8 irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_mutex_key#8 &ei->i_es_lock irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_mutex_key#8 &ei->i_data_sem irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_mutex_key#8 &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_mutex_key#8 &ei->i_data_sem pool_lock#2 irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock &____s->seqcount irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &type->s_umount_key#28/1 proc_subdir_lock irq_context: 0 &type->s_umount_key#28/1 proc_inum_ida.xa_lock irq_context: 0 &type->s_umount_key#28/1 proc_subdir_lock irq_context: 0 &type->s_umount_key#28/1 &journal->j_state_lock irq_context: 0 &type->s_umount_key#28/1 kthread_create_lock irq_context: 0 &type->s_umount_key#28/1 &p->pi_lock irq_context: 0 &type->s_umount_key#28/1 &x->wait irq_context: 0 &type->s_umount_key#28/1 &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#28/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#28/1 &journal->j_wait_done_commit irq_context: 0 &journal->j_wait_done_commit irq_context: 0 &journal->j_wait_done_commit &p->pi_lock irq_context: 0 &journal->j_wait_done_commit &p->pi_lock &rq->__lock irq_context: 0 &journal->j_wait_done_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#28/1 &journal->j_state_lock irq_context: 0 &type->s_umount_key#28/1 &p->alloc_lock irq_context: 0 &type->s_umount_key#28/1 cpu_hotplug_lock irq_context: 0 &type->s_umount_key#28/1 cpu_hotplug_lock wq_pool_mutex irq_context: 0 &type->s_umount_key#28/1 cpu_hotplug_lock wq_pool_mutex fs_reclaim irq_context: 0 &type->s_umount_key#28/1 cpu_hotplug_lock wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#28/1 cpu_hotplug_lock wq_pool_mutex pool_lock#2 irq_context: 0 &type->s_umount_key#28/1 cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock irq_context: 0 &type->s_umount_key#28/1 cpu_hotplug_lock wq_pool_mutex &wq->mutex irq_context: 0 &type->s_umount_key#28/1 cpu_hotplug_lock wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 &journal->j_state_lock irq_context: 0 &journal->j_state_lock &journal->j_wait_done_commit irq_context: 0 &journal->j_state_lock &journal->j_wait_commit irq_context: 0 &type->s_umount_key#28/1 wq_pool_mutex irq_context: 0 &type->s_umount_key#28/1 wq_pool_mutex &wq->mutex irq_context: 0 &type->s_umount_key#28/1 &sb->s_type->i_lock_key#22 &dentry->d_lock irq_context: 0 &type->s_umount_key#28/1 &ei->i_es_lock irq_context: 0 &type->s_umount_key#28/1 ext4_grpinfo_slab_create_mutex irq_context: 0 &type->s_umount_key#28/1 ext4_grpinfo_slab_create_mutex slab_mutex irq_context: 0 &type->s_umount_key#28/1 ext4_grpinfo_slab_create_mutex slab_mutex fs_reclaim irq_context: 0 &type->s_umount_key#28/1 ext4_grpinfo_slab_create_mutex slab_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#28/1 ext4_grpinfo_slab_create_mutex slab_mutex &c->lock irq_context: 0 &type->s_umount_key#28/1 ext4_grpinfo_slab_create_mutex slab_mutex &____s->seqcount irq_context: 0 &type->s_umount_key#28/1 ext4_grpinfo_slab_create_mutex slab_mutex pool_lock#2 irq_context: 0 &type->s_umount_key#28/1 ext4_grpinfo_slab_create_mutex slab_mutex &n->list_lock irq_context: 0 &type->s_umount_key#28/1 ext4_grpinfo_slab_create_mutex slab_mutex pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#28/1 ext4_grpinfo_slab_create_mutex slab_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#28/1 ext4_grpinfo_slab_create_mutex slab_mutex &root->kernfs_rwsem irq_context: 0 &type->s_umount_key#28/1 ext4_grpinfo_slab_create_mutex slab_mutex &k->list_lock irq_context: 0 &type->s_umount_key#28/1 ext4_grpinfo_slab_create_mutex slab_mutex lock irq_context: 0 &type->s_umount_key#28/1 ext4_grpinfo_slab_create_mutex slab_mutex lock kernfs_idr_lock irq_context: 0 &type->s_umount_key#28/1 ext4_grpinfo_slab_create_mutex slab_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &type->s_umount_key#28/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#28/1 ext4_li_mtx irq_context: 0 &type->s_umount_key#28/1 lock irq_context: 0 &type->s_umount_key#28/1 lock kernfs_idr_lock irq_context: 0 &type->s_umount_key#28/1 &root->kernfs_rwsem irq_context: 0 &type->s_umount_key#28/1 &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &type->s_umount_key#28/1 lock kernfs_idr_lock pool_lock#2 irq_context: 0 &type->s_umount_key#28/1 (console_sem).lock irq_context: 0 &type->s_umount_key#28/1 console_lock console_srcu console_owner_lock irq_context: 0 &type->s_umount_key#28/1 console_lock console_srcu console_owner irq_context: 0 &type->s_umount_key#28/1 console_lock console_srcu console_owner &port_lock_key irq_context: 0 &type->s_umount_key#28/1 console_lock console_srcu console_owner console_owner_lock irq_context: 0 &type->s_umount_key#28/1 &dentry->d_lock irq_context: 0 &sb->s_type->i_lock_key#22 irq_context: 0 &type->i_mutex_dir_key#3 irq_context: 0 &type->i_mutex_dir_key#3 fs_reclaim irq_context: 0 &type->i_mutex_dir_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#3 pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_es_lock irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#3 mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#3 &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#3 &xa->xa_lock#8 irq_context: 0 &type->i_mutex_dir_key#3 &xa->xa_lock#8 pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 lock#4 irq_context: 0 &type->i_mutex_dir_key#3 &mapping->private_lock irq_context: 0 &type->i_mutex_dir_key#3 tk_core.seq.seqcount irq_context: 0 &type->i_mutex_dir_key#3 &dd->lock irq_context: 0 &type->i_mutex_dir_key#3 &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#3 bit_wait_table + i irq_context: 0 &type->i_mutex_dir_key#3 &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#3 inode_hash_lock irq_context: 0 &type->i_mutex_dir_key#3 batched_entropy_u8.lock irq_context: 0 &type->i_mutex_dir_key#3 kfence_freelist_lock irq_context: 0 &type->i_mutex_dir_key#3 inode_hash_lock &sb->s_type->i_lock_key#22 irq_context: 0 &type->i_mutex_dir_key#3 inode_hash_lock &s->s_inode_list_lock irq_context: 0 &type->i_mutex_dir_key#3 &journal->j_state_lock irq_context: 0 &type->i_mutex_dir_key#3 &sb->s_type->i_lock_key#22 irq_context: 0 &type->i_mutex_dir_key#3 &sb->s_type->i_lock_key#22 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#3 &sb->s_type->i_lock_key#22 &dentry->d_lock &wq irq_context: 0 &type->i_mutex_dir_key#3 irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem rcu_read_lock mount_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem fs_reclaim irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem rename_lock irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem rename_lock rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem rename_lock rename_lock.seqcount &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem mount_lock irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem mount_lock &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem mount_lock mount_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem mount_lock mount_lock.seqcount &new_ns->poll irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem mount_lock mount_lock.seqcount &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem mount_lock mount_lock.seqcount rcu_read_lock &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem mount_lock mount_lock.seqcount &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem mount_lock mount_lock.seqcount pool_lock#2 irq_context: 0 tomoyo_ss quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key namespace_sem &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key namespace_sem rcu_read_lock &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key namespace_sem &obj_hash[i].lock irq_context: 0 vmap_purge_lock free_vmap_area_lock irq_context: 0 vmap_purge_lock free_vmap_area_lock &obj_hash[i].lock irq_context: 0 vmap_purge_lock free_vmap_area_lock pool_lock#2 irq_context: 0 vmap_purge_lock free_vmap_area_lock init_mm.page_table_lock irq_context: 0 vmap_purge_lock free_vmap_area_lock init_mm.page_table_lock rcu_read_lock pool_lock#2 irq_context: 0 vmap_purge_lock free_vmap_area_lock init_mm.page_table_lock &obj_hash[i].lock irq_context: 0 vmap_purge_lock &rq->__lock irq_context: 0 rcu_state.barrier_mutex irq_context: 0 rcu_state.barrier_mutex rcu_state.barrier_lock irq_context: 0 rcu_state.barrier_mutex &x->wait#24 irq_context: 0 (init_mm).mmap_lock irq_context: 0 (init_mm).mmap_lock &rq->__lock irq_context: 0 &type->s_umount_key#29/1 irq_context: 0 &type->s_umount_key#29/1 fs_reclaim irq_context: 0 &type->s_umount_key#29/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#29/1 pool_lock#2 irq_context: 0 &type->s_umount_key#29/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#29/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#29/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#29/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#29/1 &c->lock irq_context: 0 &type->s_umount_key#29/1 &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#29/1 &zone->lock irq_context: 0 &type->s_umount_key#29/1 &____s->seqcount irq_context: 0 &type->s_umount_key#29/1 sb_lock irq_context: 0 &type->s_umount_key#29/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#29/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#29/1 &sb->s_type->i_lock_key#23 irq_context: 0 &type->s_umount_key#29/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#29/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#29/1 &sb->s_type->i_lock_key#23 &dentry->d_lock irq_context: 0 &type->s_umount_key#29/1 &sb->s_type->i_mutex_key#9 irq_context: 0 &type->s_umount_key#29/1 &sb->s_type->i_mutex_key#9 fs_reclaim irq_context: 0 &type->s_umount_key#29/1 &sb->s_type->i_mutex_key#9 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#29/1 &sb->s_type->i_mutex_key#9 pool_lock#2 irq_context: 0 &type->s_umount_key#29/1 &sb->s_type->i_mutex_key#9 &dentry->d_lock irq_context: 0 &type->s_umount_key#29/1 &sb->s_type->i_mutex_key#9 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#29/1 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 irq_context: 0 &type->s_umount_key#29/1 &sb->s_type->i_mutex_key#9 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#29/1 &sb->s_type->i_mutex_key#9 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#29/1 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 &dentry->d_lock irq_context: 0 &type->s_umount_key#29/1 &dentry->d_lock irq_context: 0 rcu_read_lock &sb->s_type->i_lock_key#23 irq_context: 0 &sb->s_type->i_mutex_key#9 irq_context: 0 &sb->s_type->i_mutex_key#9 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#9 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#9 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#9 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#9 proc_subdir_lock irq_context: 0 &sb->s_type->i_mutex_key#9 mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 irq_context: 0 &sb->s_type->i_mutex_key#9 &s->s_inode_list_lock irq_context: 0 &sb->s_type->i_mutex_key#9 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 &dentry->d_lock &wq irq_context: 0 &sb->s_type->i_mutex_key#9 sysctl_lock irq_context: 0 sb_writers#3 irq_context: 0 sb_writers#3 mount_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rename_lock.seqcount irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 fs_reclaim irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &pcp->lock &zone->lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &zone->lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &____s->seqcount irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 pool_lock#2 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &obj_hash[i].lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &dentry->d_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 sysctl_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &s->s_inode_list_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 tk_core.seq.seqcount irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 &dentry->d_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 &dentry->d_lock &wq#2 irq_context: 0 tomoyo_ss rcu_read_lock rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#9 irq_context: 0 sb_writers#3 sysctl_lock irq_context: 0 sb_writers#3 fs_reclaim irq_context: 0 sb_writers#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#3 pool_lock#2 irq_context: 0 sb_writers#3 &obj_hash[i].lock irq_context: 0 sb_writers#3 &h->resize_lock irq_context: 0 sb_writers#3 &h->resize_lock free_hpage_work irq_context: 0 sb_writers#3 &h->resize_lock hugetlb_lock irq_context: 0 sb_writers#3 &h->resize_lock fs_reclaim irq_context: 0 sb_writers#3 &h->resize_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#3 &h->resize_lock &pcp->lock &zone->lock irq_context: 0 sb_writers#3 &h->resize_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#3 &h->resize_lock &____s->seqcount irq_context: 0 sb_writers#3 &h->resize_lock pool_lock#2 irq_context: 0 sb_writers#3 hugetlb_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &dentry->d_lock &wq#2 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &c->lock irq_context: 0 &sig->cred_guard_mutex init_fs.lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock init_fs.seq.seqcount irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 fs_reclaim irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &dentry->d_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_es_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &mapping->private_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 inode_hash_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 inode_hash_lock &sb->s_type->i_lock_key#22 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 inode_hash_lock &s->s_inode_list_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &xa->xa_lock#8 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &xa->xa_lock#8 &c->lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &xa->xa_lock#8 &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &xa->xa_lock#8 &zone->lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &xa->xa_lock#8 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &xa->xa_lock#8 &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &xa->xa_lock#8 pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 lock#4 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &zone->lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 tk_core.seq.seqcount irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &dd->lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 lock#4 &lruvec->lru_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &c->lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rcu_read_lock &dd->lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rcu_read_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rcu_read_lock tk_core.seq.seqcount irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 bit_wait_table + i irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &rq->__lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &journal->j_state_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &sb->s_type->i_lock_key#22 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &sb->s_type->i_lock_key#22 &dentry->d_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &sb->s_type->i_lock_key#22 &dentry->d_lock &wq irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_lock_key#22 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rename_lock.seqcount irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_data_sem irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_data_sem pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock &zone->lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &sb->s_type->i_lock_key#22 &dentry->d_lock &wq#2 irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 irq_context: 0 &sig->cred_guard_mutex aa_buffers_lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock mount_lock.seqcount irq_context: 0 &sig->cred_guard_mutex rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &xa->xa_lock#8 irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock lock#4 irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &xa->xa_lock#8 pool_lock#2 irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_es_lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem pool_lock#2 irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock tk_core.seq.seqcount irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &dd->lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock rcu_read_lock &dd->lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock rcu_read_lock tk_core.seq.seqcount irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &sig->cred_guard_mutex &folio_wait_table[i] irq_context: 0 &sig->cred_guard_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex tomoyo_ss irq_context: 0 &sig->cred_guard_mutex tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex tomoyo_ss pool_lock#2 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &c->lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &zone->lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &____s->seqcount irq_context: 0 &sig->cred_guard_mutex tomoyo_ss fs_reclaim irq_context: 0 &sig->cred_guard_mutex tomoyo_ss fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock init_fs.seq.seqcount irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &mm->mmap_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_log_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_log_wait.lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: softirq &(&ovs_net->masks_rebalance)->timer irq_context: softirq &(&ovs_net->masks_rebalance)->timer rcu_read_lock &pool->lock irq_context: softirq &(&ovs_net->masks_rebalance)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&ovs_net->masks_rebalance)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&ovs_net->masks_rebalance)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&ovs_net->masks_rebalance)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) &base->lock irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) &base->lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss batched_entropy_u8.lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss kfence_freelist_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &meta->lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 &zone->lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 &c->lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 integrity_iint_lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex irq_context: 0 &sig->cred_guard_mutex &iint->mutex &ei->xattr_sem irq_context: 0 &sig->cred_guard_mutex &iint->mutex fs_reclaim irq_context: 0 &sig->cred_guard_mutex &iint->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex &iint->mutex pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &iint->mutex &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex &iint->mutex rcu_read_lock mount_lock.seqcount irq_context: 0 &sig->cred_guard_mutex &iint->mutex rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 &sig->cred_guard_mutex &iint->mutex ima_extend_list_mutex irq_context: 0 &sig->cred_guard_mutex &iint->mutex ima_extend_list_mutex fs_reclaim irq_context: 0 &sig->cred_guard_mutex &iint->mutex ima_extend_list_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex &iint->mutex ima_extend_list_mutex pool_lock#2 irq_context: 0 &sig->cred_guard_mutex binfmt_lock irq_context: 0 &sig->cred_guard_mutex entries_lock irq_context: 0 &sig->cred_guard_mutex &dentry->d_lock &lru->node[i].lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem &c->lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem &zone->lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &ei->xattr_sem irq_context: 0 &sig->cred_guard_mutex &tsk->futex_exit_mutex irq_context: 0 &sig->cred_guard_mutex &tsk->futex_exit_mutex &p->pi_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &p->alloc_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &p->alloc_lock &memcg->mm_list.lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &sighand->siglock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &newf->file_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock batched_entropy_u64.lock irq_context: 0 batched_entropy_u16.lock irq_context: 0 batched_entropy_u16.lock crngs.lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem irq_context: 0 &mm->mmap_lock &vma->vm_lock->lock irq_context: 0 &mm->mmap_lock mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &mm->mmap_lock &anon_vma->rwsem irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem pool_lock#2 irq_context: 0 &mm->mmap_lock &mm->page_table_lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page) irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 ptlock_ptr(page)#2/1 irq_context: 0 &mm->mmap_lock lock#4 irq_context: 0 &mm->mmap_lock lock#4 &lruvec->lru_lock irq_context: 0 &mm->mmap_lock lock#5 irq_context: 0 &mm->mmap_lock rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &mm->page_table_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &mm->page_table_lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &mm->page_table_lock pool_lock#2 irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 irq_context: 0 &sb->s_type->i_mutex_key#8 integrity_iint_lock irq_context: 0 &iint->mutex irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &obj_hash[i].lock pool_lock irq_context: hardirq &rq->__lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#8 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 integrity_iint_lock irq_context: 0 &iint->mutex &ei->xattr_sem irq_context: 0 &iint->mutex fs_reclaim irq_context: 0 &iint->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &iint->mutex pool_lock#2 irq_context: 0 &iint->mutex mapping.invalidate_lock irq_context: 0 &iint->mutex mapping.invalidate_lock mmu_notifier_invalidate_range_start irq_context: 0 &iint->mutex mapping.invalidate_lock &pcp->lock &zone->lock irq_context: 0 &iint->mutex mapping.invalidate_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &iint->mutex mapping.invalidate_lock &____s->seqcount irq_context: 0 &iint->mutex mapping.invalidate_lock pool_lock#2 irq_context: 0 &iint->mutex mapping.invalidate_lock &xa->xa_lock#8 irq_context: 0 &iint->mutex mapping.invalidate_lock lock#4 irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_es_lock irq_context: 0 &iint->mutex mapping.invalidate_lock &zone->lock irq_context: 0 &iint->mutex mapping.invalidate_lock &c->lock irq_context: 0 &iint->mutex mapping.invalidate_lock tk_core.seq.seqcount irq_context: 0 &iint->mutex mapping.invalidate_lock &dd->lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &dd->lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock tk_core.seq.seqcount irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &iint->mutex mapping.invalidate_lock lock#4 &lruvec->lru_lock irq_context: 0 &iint->mutex &folio_wait_table[i] irq_context: 0 &iint->mutex &rq->__lock irq_context: 0 &iint->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &iint->mutex &obj_hash[i].lock irq_context: 0 &iint->mutex mmu_notifier_invalidate_range_start irq_context: 0 &iint->mutex &pcp->lock &zone->lock irq_context: 0 &iint->mutex &zone->lock irq_context: 0 &iint->mutex &____s->seqcount irq_context: 0 &iint->mutex rcu_read_lock pool_lock#2 irq_context: 0 &iint->mutex rcu_read_lock mount_lock.seqcount irq_context: 0 &iint->mutex rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 &iint->mutex ima_extend_list_mutex irq_context: 0 &iint->mutex ima_extend_list_mutex fs_reclaim irq_context: 0 &iint->mutex ima_extend_list_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &iint->mutex ima_extend_list_mutex pool_lock#2 irq_context: 0 &mm->mmap_lock &obj_hash[i].lock pool_lock irq_context: 0 binfmt_lock irq_context: 0 &dentry->d_lock &lru->node[i].lock irq_context: 0 &fsnotify_mark_srcu irq_context: 0 &type->s_umount_key#30 irq_context: 0 &type->s_umount_key#30 shrinker_rwsem irq_context: 0 &type->s_umount_key#30 rcu_read_lock &dentry->d_lock irq_context: 0 &type->s_umount_key#30 &dentry->d_lock irq_context: 0 &type->s_umount_key#30 &dentry->d_lock &lru->node[i].lock irq_context: 0 &type->s_umount_key#30 rename_lock.seqcount irq_context: 0 &type->s_umount_key#30 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 &type->s_umount_key#30 &dentry->d_lock/1 irq_context: 0 &type->s_umount_key#30 &dentry->d_lock &dentry->d_lock/1 &lru->node[i].lock irq_context: 0 &type->s_umount_key#30 &sb->s_type->i_lock_key#23 irq_context: 0 &type->s_umount_key#30 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#30 &xa->xa_lock#8 irq_context: 0 &type->s_umount_key#30 sysctl_lock irq_context: 0 &type->s_umount_key#30 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#30 pool_lock#2 irq_context: 0 &type->s_umount_key#30 &fsnotify_mark_srcu irq_context: 0 &type->s_umount_key#30 &obj_hash[i].lock pool_lock irq_context: 0 &type->s_umount_key#30 sb_lock irq_context: 0 unnamed_dev_ida.xa_lock irq_context: 0 &xa->xa_lock#8 irq_context: 0 prog_idr_lock irq_context: 0 prog_idr_lock &obj_hash[i].lock irq_context: 0 prog_idr_lock pool_lock#2 irq_context: 0 map_idr_lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 btf_idr_lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 btf_idr_lock &obj_hash[i].lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 btf_idr_lock pool_lock#2 irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page)#2 irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) pool_lock#2 irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page)#2 key irq_context: 0 &mm->mmap_lock mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#3 &pcp->lock &zone->lock irq_context: 0 &type->i_mutex_dir_key#3 &dentry->d_lock &wq irq_context: 0 &vma->vm_lock->lock fs_reclaim irq_context: 0 &vma->vm_lock->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &vma->vm_lock->lock &pcp->lock &zone->lock irq_context: 0 &vma->vm_lock->lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &vma->vm_lock->lock &____s->seqcount irq_context: 0 &vma->vm_lock->lock pool_lock#2 irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 lock#4 irq_context: 0 &type->i_mutex_dir_key#3 rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#3 &dentry->d_lock &wq#2 irq_context: 0 &type->i_mutex_dir_key#3 &sb->s_type->i_lock_key#22 &dentry->d_lock &wq#2 irq_context: 0 &iint->mutex mapping.invalidate_lock &xa->xa_lock#8 pool_lock#2 irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem pool_lock#2 irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_read_lock &pool->lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rcu_tasks_trace.tasks_gp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&aux->work) irq_context: 0 (wq_completion)events (work_completion)(&aux->work) map_idr_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) map_idr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) map_idr_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&aux->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pack_mutex irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pack_mutex text_mutex irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pack_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pcpu_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) vmap_area_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) purge_vmap_area_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock pool_lock#2 irq_context: 0 &iint->mutex &c->lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock pool_lock#2 irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_es_lock key#2 irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &c->lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &____s->seqcount irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page) irq_context: 0 &iint->mutex mapping.invalidate_lock &xa->xa_lock#8 batched_entropy_u8.lock irq_context: 0 &iint->mutex mapping.invalidate_lock &xa->xa_lock#8 kfence_freelist_lock irq_context: 0 &type->i_mutex_dir_key#3 &c->lock irq_context: 0 &type->i_mutex_dir_key#3 lock#4 &lruvec->lru_lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &dd->lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock tk_core.seq.seqcount irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &type->i_mutex_dir_key#3 &xa->xa_lock#8 &c->lock irq_context: 0 &type->i_mutex_dir_key#3 &xa->xa_lock#8 &____s->seqcount irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem pool_lock#2 irq_context: 0 &mm->mmap_lock batched_entropy_u8.lock irq_context: 0 &mm->mmap_lock kfence_freelist_lock irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 key irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 lock#4 &lruvec->lru_lock irq_context: 0 &vma->vm_lock->lock mmu_notifier_invalidate_range_start irq_context: 0 &port->mutex irq_context: 0 &tty->ldisc_sem irq_context: 0 &tty->ldisc_sem &tty->termios_rwsem irq_context: 0 &tty->ldisc_sem &mm->mmap_lock irq_context: 0 &tty->ldisc_sem &tty->termios_rwsem irq_context: 0 &tty->ldisc_sem &tty->termios_rwsem &port->mutex irq_context: 0 &tty->ldisc_sem &tty->termios_rwsem &tty->ldisc_sem &tty->write_wait irq_context: 0 &tty->ldisc_sem &tty->termios_rwsem &tty->ldisc_sem &tty->read_wait irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 integrity_iint_lock irq_context: 0 &sb->s_type->i_mutex_key#9 rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 &dentry->d_lock &wq#2 irq_context: 0 &sb->s_type->i_lock_key#23 irq_context: 0 &p->lock irq_context: 0 &p->lock fs_reclaim irq_context: 0 &p->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &p->lock pool_lock#2 irq_context: 0 &p->lock &mm->mmap_lock irq_context: 0 &type->s_umount_key#31/1 irq_context: 0 &type->s_umount_key#31/1 fs_reclaim irq_context: 0 &type->s_umount_key#31/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#31/1 pool_lock#2 irq_context: 0 &type->s_umount_key#31/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#31/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#31/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#31/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#31/1 sb_lock irq_context: 0 &type->s_umount_key#31/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#31/1 &root->kernfs_rwsem irq_context: 0 &type->s_umount_key#31/1 &root->kernfs_rwsem inode_hash_lock irq_context: 0 &type->s_umount_key#31/1 &root->kernfs_rwsem fs_reclaim irq_context: 0 &type->s_umount_key#31/1 &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#31/1 &root->kernfs_rwsem pool_lock#2 irq_context: 0 &type->s_umount_key#31/1 &root->kernfs_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#31/1 &root->kernfs_rwsem inode_hash_lock &sb->s_type->i_lock_key#24 irq_context: 0 &type->s_umount_key#31/1 &root->kernfs_rwsem inode_hash_lock &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#31/1 &root->kernfs_rwsem tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#31/1 &root->kernfs_rwsem &sb->s_type->i_lock_key#24 irq_context: 0 &type->s_umount_key#31/1 &sb->s_type->i_lock_key#24 irq_context: 0 &type->s_umount_key#31/1 &sb->s_type->i_lock_key#24 &dentry->d_lock irq_context: 0 &type->s_umount_key#31/1 &root->kernfs_supers_rwsem irq_context: 0 &type->s_umount_key#31/1 &dentry->d_lock irq_context: 0 &root->kernfs_iattr_rwsem irq_context: 0 &type->i_mutex_dir_key#4 irq_context: 0 &type->i_mutex_dir_key#4 fs_reclaim irq_context: 0 &type->i_mutex_dir_key#4 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#4 &c->lock irq_context: 0 &type->i_mutex_dir_key#4 &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#4 pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#4 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#4 rcu_read_lock rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem inode_hash_lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem fs_reclaim irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem inode_hash_lock &sb->s_type->i_lock_key#24 irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem inode_hash_lock &s->s_inode_list_lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem tk_core.seq.seqcount irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &sb->s_type->i_lock_key#24 irq_context: 0 &type->i_mutex_dir_key#4 &sb->s_type->i_lock_key#24 irq_context: 0 &type->i_mutex_dir_key#4 &sb->s_type->i_lock_key#24 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#4 &sb->s_type->i_lock_key#24 &dentry->d_lock &wq irq_context: 0 &type->i_mutex_dir_key#4 &dentry->d_lock &wq irq_context: 0 &ent->pde_unload_lock irq_context: 0 &p->lock &c->lock irq_context: 0 &p->lock &____s->seqcount irq_context: 0 &p->lock file_systems_lock irq_context: 0 namespace_sem mount_lock mount_lock.seqcount &new_ns->poll irq_context: 0 namespace_sem mount_lock mount_lock.seqcount &dentry->d_lock irq_context: 0 namespace_sem mount_lock mount_lock.seqcount rcu_read_lock &dentry->d_lock irq_context: 0 namespace_sem mount_lock mount_lock.seqcount &obj_hash[i].lock irq_context: 0 namespace_sem mount_lock mount_lock.seqcount pool_lock#2 irq_context: 0 &type->s_umount_key#30 &rq->__lock irq_context: 0 &x->wait#25 irq_context: 0 &mm->mmap_lock resource_lock irq_context: 0 &net->unx.table.locks[i] irq_context: 0 &sb->s_type->i_lock_key#8 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#10 irq_context: 0 &sb->s_type->i_mutex_key#10 &net->unx.table.locks[i] irq_context: 0 &sb->s_type->i_mutex_key#10 &u->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &u->lock clock-AF_UNIX irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait irq_context: 0 &sb->s_type->i_mutex_key#10 rlock-AF_UNIX irq_context: 0 &sb->s_type->i_mutex_key#10 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &dir->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &obj_hash[i].lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock lock#4 &lruvec->lru_lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock &____s->seqcount#3 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex tomoyo_ss quarantine_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock &c->lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &p->alloc_lock irq_context: 0 &sig->cred_guard_mutex &p->alloc_lock &x->wait#25 irq_context: 0 &sig->cred_guard_mutex &p->alloc_lock &x->wait#25 &p->pi_lock irq_context: 0 &sig->cred_guard_mutex &p->alloc_lock &x->wait#25 &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &p->alloc_lock &x->wait#25 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->wait_chldexit irq_context: 0 tasklist_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &p->alloc_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &p->alloc_lock &memcg->mm_list.lock irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem &c->lock irq_context: 0 &type->i_mutex_dir_key#3 namespace_sem &____s->seqcount irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock irq_context: 0 &mm->mmap_lock &p->alloc_lock irq_context: 0 &mm->mmap_lock lock#4 irq_context: 0 &mm->mmap_lock lock#4 &lruvec->lru_lock irq_context: 0 &mm->mmap_lock lock#5 irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 &____s->seqcount irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 pool_lock#2 irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 key irq_context: 0 &mm->mmap_lock &lruvec->lru_lock irq_context: 0 &memcg->mm_list.lock irq_context: 0 tasklist_lock &sighand->siglock &____s->seqcount irq_context: 0 tasklist_lock &sighand->siglock pool_lock#2 irq_context: 0 tasklist_lock &sighand->siglock &c->lock irq_context: 0 rcu_read_lock &____s->seqcount#5 irq_context: 0 &prev->lock irq_context: 0 &sighand->siglock &(&sig->stats_lock)->lock irq_context: 0 &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_es_lock key#2 irq_context: 0 &type->i_mutex_dir_key#3 &xa->xa_lock#8 rcu_read_lock pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 &xa->xa_lock#8 &obj_hash[i].lock irq_context: 0 sb_writers#4 irq_context: 0 sb_writers#4 mount_lock irq_context: 0 &type->i_mutex_dir_key#3 rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#3 &dentry->d_lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 integrity_iint_lock irq_context: 0 &sig->cred_guard_mutex &ei->xattr_sem &mapping->private_lock irq_context: 0 &mm->mmap_lock lock#4 rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock lock#4 &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#9 &pcp->lock &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#9 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#9 &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#9 &c->lock irq_context: 0 sb_writers#3 tk_core.seq.seqcount irq_context: 0 sb_writers#3 &sb->s_type->i_lock_key#23 irq_context: 0 sb_writers#3 &wb->list_lock irq_context: 0 sb_writers#3 &wb->list_lock &sb->s_type->i_lock_key#23 irq_context: 0 &sb->s_type->i_mutex_key#9 &p->alloc_lock irq_context: 0 &sb->s_type->i_mutex_key#9 &pid->lock irq_context: 0 &p->alloc_lock &fs->lock &dentry->d_lock irq_context: 0 &p->lock namespace_sem irq_context: 0 &p->lock namespace_sem &new_ns->ns_lock irq_context: 0 &p->lock namespace_sem rcu_read_lock mount_lock.seqcount irq_context: 0 &p->lock namespace_sem rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 &type->s_umount_key#32 irq_context: 0 &type->s_umount_key#32 &lru->node[i].lock irq_context: 0 &type->s_umount_key#32 &dentry->d_lock irq_context: 0 &type->s_umount_key#32 &sb->s_type->i_lock_key#22 irq_context: 0 &type->s_umount_key#32 &sb->s_type->i_lock_key#22 &lru->node[i].lock irq_context: 0 &type->s_umount_key#32 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#32 pool_lock#2 irq_context: 0 &type->s_umount_key#32 &obj_hash[i].lock pool_lock irq_context: 0 &type->s_umount_key#32 &journal->j_state_lock irq_context: 0 &type->s_umount_key#32 &p->alloc_lock irq_context: 0 &type->s_umount_key#32 (work_completion)(&sbi->s_error_work) irq_context: 0 &type->s_umount_key#32 &journal->j_state_lock irq_context: 0 &type->s_umount_key#32 key#3 irq_context: 0 &type->s_umount_key#32 key#4 irq_context: 0 &type->s_umount_key#32 &sbi->s_error_lock irq_context: 0 &type->s_umount_key#32 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#32 &c->lock irq_context: 0 &type->s_umount_key#32 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#32 &base->lock irq_context: 0 &type->s_umount_key#32 &base->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#32 &fq->mq_flush_lock irq_context: 0 &type->s_umount_key#32 &dd->lock irq_context: 0 &type->s_umount_key#32 rcu_read_lock &pool->lock irq_context: 0 &type->s_umount_key#32 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#32 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &type->s_umount_key#32 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &type->s_umount_key#32 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#32 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#32 bit_wait_table + i irq_context: 0 &type->s_umount_key#32 &rq->__lock irq_context: 0 &type->s_umount_key#32 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &fq->mq_flush_lock irq_context: softirq &fq->mq_flush_lock tk_core.seq.seqcount irq_context: softirq &fq->mq_flush_lock &q->requeue_lock irq_context: softirq &fq->mq_flush_lock &obj_hash[i].lock irq_context: softirq &fq->mq_flush_lock rcu_read_lock &pool->lock irq_context: softirq &fq->mq_flush_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &fq->mq_flush_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) &q->requeue_lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) &hctx->lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock &hctx->lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock &virtscsi_vq->vq_lock irq_context: softirq &fq->mq_flush_lock bit_wait_table + i irq_context: softirq &fq->mq_flush_lock bit_wait_table + i &p->pi_lock irq_context: softirq &fq->mq_flush_lock bit_wait_table + i &p->pi_lock &rq->__lock irq_context: softirq &fq->mq_flush_lock bit_wait_table + i &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &fq->mq_flush_lock bit_wait_table + i &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &type->s_umount_key#32 ext4_li_mtx irq_context: 0 &type->s_umount_key#32 ext4_li_mtx fs_reclaim irq_context: 0 &type->s_umount_key#32 ext4_li_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#32 ext4_li_mtx pool_lock#2 irq_context: 0 &type->s_umount_key#32 ext4_li_mtx batched_entropy_u16.lock irq_context: 0 &type->s_umount_key#32 ext4_li_mtx &eli->li_list_mtx irq_context: 0 &type->s_umount_key#32 ext4_li_mtx kthread_create_lock irq_context: 0 &type->s_umount_key#32 ext4_li_mtx &p->pi_lock irq_context: 0 &type->s_umount_key#32 ext4_li_mtx &x->wait irq_context: 0 &type->s_umount_key#32 ext4_li_mtx &rq->__lock irq_context: 0 &type->s_umount_key#32 ext4_li_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#32 ext4_li_mtx &obj_hash[i].lock irq_context: 0 &type->s_umount_key#32 ext4_li_mtx &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#32 ext4_li_mtx &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#32 (console_sem).lock irq_context: 0 &type->s_umount_key#32 console_lock console_srcu console_owner_lock irq_context: 0 &type->s_umount_key#32 console_lock console_srcu console_owner irq_context: 0 &type->s_umount_key#32 console_lock console_srcu console_owner &port_lock_key irq_context: 0 &eli->li_list_mtx irq_context: 0 &type->s_umount_key#32 console_lock console_srcu console_owner console_owner_lock irq_context: 0 &type->s_umount_key#32 mount_lock irq_context: 0 &type->s_umount_key#32 mount_lock mount_lock.seqcount irq_context: 0 &type->s_umount_key#32 mount_lock mount_lock.seqcount &new_ns->poll irq_context: 0 namespace_sem irq_context: 0 namespace_sem &new_ns->ns_lock irq_context: 0 rcu_read_lock &pid->lock irq_context: 0 &sb->s_type->i_lock_key#23 &dentry->d_lock irq_context: 0 rename_lock.seqcount irq_context: 0 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 &dentry->d_lock &dentry->d_lock/1 &lru->node[i].lock irq_context: 0 &pid->lock irq_context: 0 sb_writers#4 tk_core.seq.seqcount irq_context: 0 sb_writers#4 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &____s->seqcount irq_context: 0 sb_writers#4 pool_lock#2 irq_context: 0 sb_writers#4 &c->lock irq_context: 0 sb_writers#4 &journal->j_state_lock irq_context: 0 sb_writers#4 &journal->j_state_lock irq_context: 0 sb_writers#4 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &journal->j_state_lock &base->lock irq_context: 0 sb_writers#4 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 jbd2_handle irq_context: 0 sb_writers#4 jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 jbd2_handle &____s->seqcount irq_context: 0 sb_writers#4 jbd2_handle pool_lock#2 irq_context: 0 sb_writers#4 jbd2_handle &c->lock irq_context: 0 sb_writers#4 jbd2_handle &ret->b_state_lock irq_context: 0 sb_writers#4 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 jbd2_handle &journal->j_revoke_lock irq_context: 0 sb_writers#4 jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &wb->list_lock irq_context: 0 sb_writers#4 &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &wb->work_lock irq_context: 0 sb_writers#4 &wb->work_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &wb->work_lock &base->lock irq_context: 0 sb_writers#4 &wb->work_lock &base->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rename_lock.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &s->s_inode_list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &ei->xattr_sem irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &xa->xa_lock#8 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 lock#4 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &mapping->private_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &dd->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 bit_wait_table + i irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &journal->j_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &ret->b_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_revoke_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &mapping->private_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &meta_group_info[i]->alloc_sem irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle inode_hash_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle inode_hash_lock &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle batched_entropy_u32.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &sb->s_type->i_lock_key#22 &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_wait_updates irq_context: 0 sb_internal rcu_read_lock init_fs.seq.seqcount irq_context: 0 sb_internal rcu_read_lock rcu_read_lock mount_lock.seqcount irq_context: 0 sb_internal rcu_read_lock rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_internal mmu_notifier_invalidate_range_start irq_context: 0 sb_internal pool_lock#2 irq_context: 0 sb_internal &journal->j_state_lock irq_context: 0 sb_internal jbd2_handle irq_context: 0 sb_internal jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_internal jbd2_handle pool_lock#2 irq_context: 0 sb_internal jbd2_handle &ret->b_state_lock irq_context: 0 sb_internal jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_internal jbd2_handle &journal->j_revoke_lock irq_context: 0 sb_internal jbd2_handle &mapping->private_lock irq_context: 0 sb_internal jbd2_handle &journal->j_wait_updates irq_context: 0 sb_internal &obj_hash[i].lock irq_context: 0 &ei->i_data_sem irq_context: 0 &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 &sighand->siglock hrtimer_bases.lock irq_context: 0 &sighand->siglock hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 &sighand->siglock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 file_rwsem irq_context: 0 file_rwsem &ctx->flc_lock irq_context: 0 file_rwsem &ctx->flc_lock &fll->lock irq_context: 0 &ctx->flc_lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock rcu_read_lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock rcu_read_lock &c->lock irq_context: softirq &folio_wait_table[i] &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex tk_core.seq.seqcount irq_context: 0 &sig->cred_guard_mutex sb_writers#4 mount_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 tk_core.seq.seqcount irq_context: 0 &sig->cred_guard_mutex sb_writers#4 mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex sb_writers#4 pool_lock#2 irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &journal->j_state_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle &mapping->private_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle pool_lock#2 irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle &ret->b_state_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle &journal->j_revoke_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle &ei->i_raw_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle &journal->j_wait_updates irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &sb->s_type->i_lock_key#22 irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &wb->list_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 &sig->cred_guard_mutex &iint->mutex &c->lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &iint->mutex tk_core.seq.seqcount irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock batched_entropy_u64.lock crngs.lock irq_context: 0 &mm->mmap_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 jbd2_handle &mapping->private_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rename_lock.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss pool_lock#2 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss tomoyo_policy_lock &c->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss tomoyo_policy_lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &____s->seqcount irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &c->lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle &____s->seqcount irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle &c->lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &c->lock irq_context: 0 &iint->mutex tk_core.seq.seqcount irq_context: 0 &iint->mutex sb_writers#4 mount_lock irq_context: 0 &iint->mutex sb_writers#4 tk_core.seq.seqcount irq_context: 0 &iint->mutex sb_writers#4 mmu_notifier_invalidate_range_start irq_context: 0 &iint->mutex sb_writers#4 pool_lock#2 irq_context: 0 &iint->mutex sb_writers#4 &journal->j_state_lock irq_context: 0 &iint->mutex sb_writers#4 jbd2_handle irq_context: 0 &iint->mutex sb_writers#4 jbd2_handle &mapping->private_lock irq_context: 0 &iint->mutex sb_writers#4 jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 &iint->mutex sb_writers#4 jbd2_handle pool_lock#2 irq_context: 0 &iint->mutex sb_writers#4 jbd2_handle &ret->b_state_lock irq_context: 0 &iint->mutex sb_writers#4 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 &iint->mutex sb_writers#4 jbd2_handle &journal->j_revoke_lock irq_context: 0 &iint->mutex sb_writers#4 jbd2_handle &ei->i_raw_lock irq_context: 0 &iint->mutex sb_writers#4 jbd2_handle &journal->j_wait_updates irq_context: 0 &iint->mutex sb_writers#4 &obj_hash[i].lock irq_context: 0 &iint->mutex sb_writers#4 &sb->s_type->i_lock_key#22 irq_context: 0 &iint->mutex sb_writers#4 &wb->list_lock irq_context: 0 &iint->mutex sb_writers#4 &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 rcu_read_lock &p->alloc_lock irq_context: 0 &type->s_umount_key#33/1 irq_context: 0 &type->s_umount_key#33/1 fs_reclaim irq_context: 0 &type->s_umount_key#33/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#33/1 pool_lock#2 irq_context: 0 &type->s_umount_key#33/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#33/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#33/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#33/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#33/1 sb_lock irq_context: 0 &type->s_umount_key#33/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#33/1 &c->lock irq_context: 0 &type->s_umount_key#33/1 &____s->seqcount irq_context: 0 &type->s_umount_key#33/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#33/1 &sb->s_type->i_lock_key#25 irq_context: 0 &type->s_umount_key#33/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#33/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#33/1 &sb->s_type->i_lock_key#25 &dentry->d_lock irq_context: 0 &type->s_umount_key#33/1 &sb->s_type->i_mutex_key#11 irq_context: 0 &type->s_umount_key#33/1 &sb->s_type->i_mutex_key#11 fs_reclaim irq_context: 0 &type->s_umount_key#33/1 &sb->s_type->i_mutex_key#11 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#33/1 &sb->s_type->i_mutex_key#11 pool_lock#2 irq_context: 0 &type->s_umount_key#33/1 &sb->s_type->i_mutex_key#11 &dentry->d_lock irq_context: 0 &type->s_umount_key#33/1 &sb->s_type->i_mutex_key#11 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#33/1 &sb->s_type->i_mutex_key#11 &sb->s_type->i_lock_key#25 irq_context: 0 &type->s_umount_key#33/1 &sb->s_type->i_mutex_key#11 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#33/1 &sb->s_type->i_mutex_key#11 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#33/1 &sb->s_type->i_mutex_key#11 &sb->s_type->i_lock_key#25 &dentry->d_lock irq_context: 0 &type->s_umount_key#33/1 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#2 irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem rcu_read_lock mount_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem fs_reclaim irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem rename_lock irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem rename_lock rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem rename_lock rename_lock.seqcount &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem mount_lock irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem mount_lock &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem mount_lock mount_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem mount_lock mount_lock.seqcount &new_ns->poll irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem mount_lock mount_lock.seqcount &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem mount_lock mount_lock.seqcount rcu_read_lock &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem mount_lock mount_lock.seqcount &obj_hash[i].lock irq_context: 0 tomoyo_ss tomoyo_policy_lock &c->lock irq_context: 0 tomoyo_ss tomoyo_policy_lock &____s->seqcount irq_context: 0 &type->s_umount_key/1 fs_reclaim irq_context: 0 &type->s_umount_key/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#34 irq_context: 0 &type->s_umount_key#34 sb_lock irq_context: 0 &type->s_umount_key#34 fs_reclaim irq_context: 0 &type->s_umount_key#34 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#34 pool_lock#2 irq_context: 0 &type->s_umount_key#34 &dentry->d_lock irq_context: 0 &type->s_umount_key#34 &lru->node[i].lock irq_context: 0 &type->s_umount_key#34 rcu_read_lock &dentry->d_lock irq_context: 0 &type->s_umount_key#34 &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#4 irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem rcu_read_lock mount_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem fs_reclaim irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem rename_lock irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem rename_lock rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem rename_lock rename_lock.seqcount &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem mount_lock irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem mount_lock &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem mount_lock mount_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem mount_lock mount_lock.seqcount &new_ns->poll irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem mount_lock mount_lock.seqcount &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem mount_lock mount_lock.seqcount rcu_read_lock &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#4 namespace_sem mount_lock mount_lock.seqcount &obj_hash[i].lock irq_context: 0 &type->s_umount_key#35 irq_context: 0 &type->s_umount_key#35 sb_lock irq_context: 0 &type->s_umount_key#35 &dentry->d_lock irq_context: 0 rcu_read_lock &dentry->d_lock sysctl_lock irq_context: 0 &type->s_umount_key#36/1 irq_context: 0 &type->s_umount_key#36/1 fs_reclaim irq_context: 0 &type->s_umount_key#36/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#36/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#36/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#36/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#36/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#36/1 sb_lock irq_context: 0 &type->s_umount_key#36/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#36/1 pool_lock#2 irq_context: 0 &type->s_umount_key#36/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#36/1 &sb->s_type->i_lock_key#26 irq_context: 0 &type->s_umount_key#36/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#36/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#36/1 &sb->s_type->i_lock_key#26 &dentry->d_lock irq_context: 0 &type->s_umount_key#36/1 &dentry->d_lock irq_context: 0 &type->s_umount_key#36/1 &c->lock irq_context: 0 &type->s_umount_key#36/1 &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem rcu_read_lock mount_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem rename_lock irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem rename_lock rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem rename_lock rename_lock.seqcount &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem mount_lock irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem mount_lock &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem mount_lock mount_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem mount_lock mount_lock.seqcount &new_ns->poll irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem mount_lock mount_lock.seqcount &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem mount_lock mount_lock.seqcount rcu_read_lock &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#9 namespace_sem mount_lock mount_lock.seqcount &obj_hash[i].lock irq_context: 0 redirect_lock irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock fs_reclaim irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock pool_lock#2 irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock &mm->mmap_lock irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock &tty->termios_rwsem irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock &tty->termios_rwsem &tty->write_wait irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock &tty->termios_rwsem &ldata->output_lock irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock &tty->termios_rwsem &ldata->output_lock &port_lock_key irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock &tty->termios_rwsem &ldata->output_lock &port_lock_key &dev->power.lock irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock &tty->termios_rwsem &ldata->output_lock &port_lock_key &dev->power.lock rcu_read_lock &pool->lock irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock &tty->termios_rwsem &ldata->output_lock &port_lock_key &dev->power.lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock &tty->termios_rwsem &ldata->output_lock &port_lock_key &dev->power.lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock &tty->termios_rwsem &ldata->output_lock &port_lock_key &dev->power.lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock &tty->termios_rwsem &ldata->output_lock &port_lock_key &dev->power.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock &tty->termios_rwsem &ldata->output_lock &port_lock_key &dev->power.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock &tty->termios_rwsem &ldata->output_lock &rq->__lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &port_lock_key irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &dev->power.lock hrtimer_bases.lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &dev->power.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: hardirq &i->lock &port_lock_key irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock &tty->termios_rwsem &port_lock_key irq_context: hardirq &i->lock &port_lock_key &port->lock irq_context: hardirq &i->lock &port_lock_key &tty->write_wait irq_context: hardirq &i->lock &port_lock_key &tty->write_wait &p->pi_lock irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock &tty->termios_rwsem &port_lock_key &dev->power.lock irq_context: 0 &tty->ldisc_sem &tty->atomic_write_lock &tty->files_lock irq_context: 0 &tty->ldisc_sem &tty->write_wait irq_context: 0 &type->s_umount_key#37/1 irq_context: 0 &type->s_umount_key#37/1 fs_reclaim irq_context: 0 &type->s_umount_key#37/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#37/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#37/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#37/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#37/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#37/1 sb_lock irq_context: 0 &type->s_umount_key#37/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#37/1 pool_lock#2 irq_context: 0 &type->s_umount_key#37/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#37/1 &sb->s_type->i_lock_key#27 irq_context: 0 &type->s_umount_key#37/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#37/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#37/1 &sb->s_type->i_lock_key#27 &dentry->d_lock irq_context: 0 &type->s_umount_key#37/1 fuse_mutex irq_context: 0 &type->s_umount_key#37/1 &dentry->d_lock irq_context: 0 &type->s_umount_key#38/1 irq_context: 0 &type->s_umount_key#38/1 fs_reclaim irq_context: 0 &type->s_umount_key#38/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#38/1 batched_entropy_u8.lock irq_context: 0 &type->s_umount_key#38/1 kfence_freelist_lock irq_context: 0 &type->s_umount_key#38/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#38/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#38/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#38/1 pool_lock#2 irq_context: 0 &type->s_umount_key#38/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#38/1 sb_lock irq_context: 0 &type->s_umount_key#38/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#38/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#38/1 &sb->s_type->i_lock_key#28 irq_context: 0 &type->s_umount_key#38/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#38/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#38/1 &sb->s_type->i_lock_key#28 &dentry->d_lock irq_context: 0 &type->s_umount_key#38/1 pstore_sb_lock irq_context: 0 &type->s_umount_key#38/1 &dentry->d_lock irq_context: 0 &type->s_umount_key#39/1 irq_context: 0 &type->s_umount_key#39/1 fs_reclaim irq_context: 0 &type->s_umount_key#39/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#39/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#39/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#39/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#39/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#39/1 sb_lock irq_context: 0 &type->s_umount_key#39/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#39/1 pool_lock#2 irq_context: 0 &type->s_umount_key#39/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#39/1 &sb->s_type->i_lock_key#29 irq_context: 0 &type->s_umount_key#39/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#39/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#39/1 &sb->s_type->i_lock_key#29 &dentry->d_lock irq_context: 0 &type->s_umount_key#39/1 bpf_preload_lock irq_context: 0 &type->s_umount_key#39/1 bpf_preload_lock (kmod_concurrent_max).lock irq_context: 0 &type->s_umount_key#39/1 bpf_preload_lock fs_reclaim irq_context: 0 &type->s_umount_key#39/1 bpf_preload_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#39/1 bpf_preload_lock pool_lock#2 irq_context: 0 &type->s_umount_key#39/1 bpf_preload_lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#39/1 bpf_preload_lock rcu_read_lock &pool->lock/1 irq_context: 0 &type->s_umount_key#39/1 bpf_preload_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#39/1 bpf_preload_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &type->s_umount_key#39/1 bpf_preload_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &type->s_umount_key#39/1 bpf_preload_lock &x->wait#17 irq_context: 0 &type->s_umount_key#39/1 bpf_preload_lock &rq->__lock irq_context: 0 &type->s_umount_key#39/1 bpf_preload_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#39/1 bpf_preload_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &____s->seqcount irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock pool_lock#2 irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page) irq_context: hardirq &x->wait#12 &p->pi_lock irq_context: 0 reading_mutex &rq->__lock irq_context: hardirq &x->wait#12 &p->pi_lock &rq->__lock irq_context: 0 reading_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: hardirq &x->wait#12 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&cb->timer) irq_context: softirq (&cb->timer) &obj_hash[i].lock irq_context: softirq (&cb->timer) &base->lock irq_context: softirq (&cb->timer) &base->lock &obj_hash[i].lock irq_context: 0 uts_sem irq_context: 0 &type->s_umount_key#39/1 bpf_preload_lock pgd_lock irq_context: 0 &type->s_umount_key#39/1 bpf_preload_lock rcu_read_lock pool_lock#2 irq_context: 0 &type->s_umount_key#39/1 bpf_preload_lock key irq_context: 0 &type->s_umount_key#39/1 bpf_preload_lock pcpu_lock irq_context: 0 &type->s_umount_key#39/1 bpf_preload_lock percpu_counters_lock irq_context: 0 &type->s_umount_key#39/1 bpf_preload_lock running_helpers_waitq.lock irq_context: 0 &type->s_umount_key#39/1 &dentry->d_lock irq_context: 0 &type->s_umount_key#14 irq_context: 0 &type->s_umount_key#14 sb_lock irq_context: 0 &type->s_umount_key#14 fs_reclaim irq_context: 0 &type->s_umount_key#14 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#14 pool_lock#2 irq_context: 0 &type->s_umount_key#14 &dentry->d_lock irq_context: 0 &type->s_umount_key#14 &lru->node[i].lock irq_context: 0 &type->s_umount_key#14 rcu_read_lock &dentry->d_lock irq_context: 0 &type->s_umount_key#14 &obj_hash[i].lock irq_context: 0 rcu_read_lock &sb->s_type->i_lock_key irq_context: 0 &type->i_mutex_dir_key#5 irq_context: 0 &type->i_mutex_dir_key#5 fs_reclaim irq_context: 0 &type->i_mutex_dir_key#5 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#5 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#5 rcu_read_lock rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#5 &dentry->d_lock &wq irq_context: 0 sb_writers#5 irq_context: 0 sb_writers#5 mount_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rename_lock.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 fs_reclaim irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &dentry->d_lock &wq#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &sbinfo->stat_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &sb->s_type->i_lock_key irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &s->s_inode_list_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tk_core.seq.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 batched_entropy_u32.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &sb->s_type->i_lock_key &dentry->d_lock irq_context: 0 &sb->s_type->i_lock_key irq_context: 0 &sb->s_type->i_mutex_key#12 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rename_lock.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fs_reclaim irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sbinfo->stat_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_lock_key irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &s->s_inode_list_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tk_core.seq.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 batched_entropy_u32.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_lock_key &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &____s->seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss pool_lock#2 irq_context: 0 sb_writers#5 tk_core.seq.seqcount irq_context: 0 sb_writers#5 &sb->s_type->i_lock_key irq_context: 0 sb_writers#5 &wb->list_lock irq_context: 0 sb_writers#5 &wb->list_lock &sb->s_type->i_lock_key irq_context: softirq &folio_wait_table[i] &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &c->lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem quarantine_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rq->__lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss rcu_read_lock pool_lock#2 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss &c->lock irq_context: 0 &sig->cred_guard_mutex &p->alloc_lock &x->wait#25 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &rq->__lock irq_context: 0 uts_sem irq_context: 0 uts_sem hostname_poll.wait.lock irq_context: 0 rcu_read_lock rcu_read_lock mount_lock.seqcount irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 &f->f_pos_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 fs_reclaim irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 pool_lock#2 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &ei->i_es_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &mapping->private_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &c->lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &____s->seqcount irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 mount_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 pool_lock#2 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &journal->j_state_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &ei->i_raw_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &journal->j_wait_updates irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &sb->s_type->i_lock_key#22 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &wb->list_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 &f->f_pos_lock &mm->mmap_lock irq_context: 0 &fs->lock &dentry->d_lock irq_context: 0 dup_mmap_sem irq_context: 0 dup_mmap_sem &mm->mmap_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 pool_lock#2 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &c->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &____s->seqcount irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &vma->vm_lock->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 fs_reclaim irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mapping->i_mmap_rwsem irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 mmu_notifier_invalidate_range_start irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mm->page_table_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 ptlock_ptr(page) irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 ptlock_ptr(page)#2 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 ptlock_ptr(page)#2 ptlock_ptr(page)#2/1 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 ptlock_ptr(page)#2 key irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mm->context.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &obj_hash[i].lock irq_context: 0 &p->alloc_lock &memcg->mm_list.lock irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 lock#4 &lruvec->lru_lock irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 lock#5 irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 lock#4 rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 lock#4 &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &tsk->futex_exit_mutex &mm->mmap_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock delayed_uprobe_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock lock#4 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock lock#4 &lruvec->lru_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock lock#5 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock ptlock_ptr(page)#2 &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock ptlock_ptr(page)#2 pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock ptlock_ptr(page)#2 key irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &vma->vm_lock->lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &mapping->i_mmap_rwsem irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &c->lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock lock#4 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock lock#5 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &lruvec->lru_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &memcg->mm_list.lock irq_context: 0 &mm->mmap_lock quarantine_lock irq_context: 0 &mm->mmap_lock pgd_lock irq_context: 0 &mm->mmap_lock key irq_context: 0 &mm->mmap_lock pcpu_lock irq_context: 0 &mm->mmap_lock percpu_counters_lock irq_context: 0 &mm->mmap_lock &n->list_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock pgd_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock key irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock pcpu_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock percpu_counters_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#5 rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#5 &dentry->d_lock &wq#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 batched_entropy_u32.lock crngs.lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &xattrs->lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tk_core.seq.seqcount irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 fs_reclaim irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &____s->seqcount irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 pool_lock#2 irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &xa->xa_lock#8 irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 lock#4 irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &info->lock irq_context: 0 &sig->cred_guard_mutex &type->i_mutex_dir_key#3 &obj_hash[i].lock pool_lock irq_context: 0 &p->alloc_lock &x->wait#25 irq_context: 0 &p->alloc_lock &x->wait#25 &p->pi_lock irq_context: 0 &p->alloc_lock &x->wait#25 &p->pi_lock &rq->__lock irq_context: 0 &p->alloc_lock &x->wait#25 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 tasklist_lock &sighand->siglock &p->pi_lock irq_context: 0 tasklist_lock &sighand->siglock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 tasklist_lock &sighand->siglock &p->pi_lock &rq->__lock irq_context: 0 tasklist_lock &sighand->siglock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sighand->siglock &obj_hash[i].lock irq_context: 0 &sighand->siglock pool_lock#2 irq_context: 0 &mm->mmap_lock &meta->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &u->bindlock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &u->bindlock &net->unx.table.locks[i] irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &u->bindlock &net->unx.table.locks[i] &net->unx.table.locks[i]/1 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &u->bindlock &net->unx.table.locks[i] &net->unx.table.locks[i]/1 &dentry->d_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &u->bindlock &bsd_socket_locks[i] irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss &c->lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss pool_lock#2 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tk_core.seq.seqcount irq_context: 0 rcu_read_lock mount_lock.seqcount irq_context: 0 &u->iolock irq_context: 0 &u->iolock rlock-AF_UNIX irq_context: 0 &ei->socket.wq.wait irq_context: 0 key#5 irq_context: softirq (&cb->timer) tk_core.seq.seqcount irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock &xa->xa_lock#8 irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock lock#4 irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock &ei->i_es_lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock tk_core.seq.seqcount irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock &dd->lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock rcu_read_lock &dd->lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock rcu_read_lock tk_core.seq.seqcount irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock lock#4 &lruvec->lru_lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex &folio_wait_table[i] irq_context: 0 &sig->cred_guard_mutex &iint->mutex &rq->__lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &pcp->lock &zone->lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &____s->seqcount irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock &sb->s_type->i_mutex_key#9 irq_context: 0 &f->f_pos_lock &sb->s_type->i_mutex_key#9 &dentry->d_lock irq_context: 0 &f->f_pos_lock &sb->s_type->i_mutex_key#9 rcu_read_lock &p->alloc_lock irq_context: 0 &f->f_pos_lock &sb->s_type->i_mutex_key#9 rename_lock.seqcount irq_context: 0 &f->f_pos_lock &sb->s_type->i_mutex_key#9 fs_reclaim irq_context: 0 &f->f_pos_lock &sb->s_type->i_mutex_key#9 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock &sb->s_type->i_mutex_key#9 pool_lock#2 irq_context: 0 &f->f_pos_lock &sb->s_type->i_mutex_key#9 rcu_read_lock rename_lock.seqcount irq_context: 0 &f->f_pos_lock &sb->s_type->i_mutex_key#9 mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 irq_context: 0 &f->f_pos_lock &sb->s_type->i_mutex_key#9 &s->s_inode_list_lock irq_context: 0 &f->f_pos_lock &sb->s_type->i_mutex_key#9 tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock &sb->s_type->i_mutex_key#9 &p->alloc_lock irq_context: 0 &f->f_pos_lock &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 &dentry->d_lock irq_context: 0 &f->f_pos_lock &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 &dentry->d_lock &wq#3 irq_context: 0 &f->f_pos_lock &sb->s_type->i_mutex_key#9 rcu_read_lock &dentry->d_lock irq_context: 0 &f->f_pos_lock &sb->s_type->i_mutex_key#9 &dentry->d_lock &lru->node[i].lock irq_context: 0 &bsd_socket_locks[i] irq_context: 0 sb_writers tk_core.seq.seqcount irq_context: 0 sb_writers &sb->s_type->i_lock_key#5 irq_context: 0 sb_writers &wb->list_lock irq_context: 0 sb_writers &wb->list_lock &sb->s_type->i_lock_key#5 irq_context: 0 &u->lock irq_context: 0 &u->lock &u->lock/1 irq_context: 0 &group->mark_mutex irq_context: 0 &group->mark_mutex &fsnotify_mark_srcu irq_context: 0 &group->mark_mutex fs_reclaim irq_context: 0 &group->mark_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &group->mark_mutex &____s->seqcount irq_context: 0 &group->mark_mutex pool_lock#2 irq_context: 0 &group->mark_mutex &c->lock irq_context: 0 &group->mark_mutex lock irq_context: 0 &group->mark_mutex lock &group->inotify_data.idr_lock irq_context: 0 &group->mark_mutex lock &group->inotify_data.idr_lock pool_lock#2 irq_context: 0 &group->mark_mutex ucounts_lock irq_context: 0 &group->mark_mutex &mark->lock irq_context: 0 &group->mark_mutex &mark->lock &fsnotify_mark_srcu irq_context: 0 &group->mark_mutex &mark->lock &fsnotify_mark_srcu &conn->lock irq_context: 0 &group->mark_mutex &mark->lock &conn->lock irq_context: 0 &group->mark_mutex &conn->lock irq_context: 0 &group->mark_mutex &sb->s_type->i_lock_key#5 irq_context: 0 &group->mark_mutex &sb->s_type->i_lock_key#5 &dentry->d_lock irq_context: 0 &group->mark_mutex &sb->s_type->i_lock_key#5 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 &dentry->d_lock/1 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#2 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#2 &dentry->d_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#2 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#2 rcu_read_lock &dentry->d_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#2 tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#2 sb_writers mount_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#2 sb_writers tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#2 sb_writers &sb->s_type->i_lock_key#5 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#2 sb_writers &wb->list_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#2 sb_writers &wb->list_lock &sb->s_type->i_lock_key#5 irq_context: 0 &fsnotify_mark_srcu &conn->lock irq_context: 0 &conn->lock irq_context: 0 &evdev->client_lock irq_context: 0 &evdev->mutex irq_context: 0 &evdev->mutex &dev->mutex#2 irq_context: 0 &evdev->mutex &mm->mmap_lock irq_context: 0 &evdev->mutex &dev->mutex#2 rcu_state.exp_mutex rcu_node_0 irq_context: 0 &evdev->mutex &dev->mutex#2 rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &evdev->mutex &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &evdev->mutex &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &evdev->mutex &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &evdev->mutex &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &evdev->mutex &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &evdev->mutex &dev->mutex#2 rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 &evdev->mutex &dev->mutex#2 rcu_state.exp_mutex &rq->__lock irq_context: 0 &evdev->mutex &dev->mutex#2 &obj_hash[i].lock irq_context: 0 sk_lock-AF_NETLINK irq_context: 0 sk_lock-AF_NETLINK slock-AF_NETLINK irq_context: 0 slock-AF_NETLINK irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock rhashtable_bucket irq_context: 0 cb_lock irq_context: 0 cb_lock genl_mutex irq_context: 0 cb_lock genl_mutex fs_reclaim irq_context: 0 cb_lock genl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex pool_lock#2 irq_context: 0 cb_lock genl_mutex &c->lock irq_context: 0 cb_lock genl_mutex &____s->seqcount irq_context: 0 cb_lock genl_mutex &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rlock-AF_NETLINK irq_context: 0 cb_lock fs_reclaim irq_context: 0 cb_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock pool_lock#2 irq_context: 0 cb_lock rlock-AF_NETLINK irq_context: 0 rlock-AF_NETLINK irq_context: 0 &nlk->wait irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rhashtable_bucket irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex batched_entropy_u32.lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex rhashtable_bucket irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex rhashtable_bucket rhashtable_bucket/1 irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &ht->lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &ht->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &ht->lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 nl_table_lock irq_context: 0 &sb->s_type->i_mutex_key#10 nl_table_wait.lock irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_NETLINK irq_context: 0 &sb->s_type->i_mutex_key#10 genl_sk_destructing_waitq.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &nlk->wait irq_context: 0 &sb->s_type->i_mutex_key#10 wlock-AF_NETLINK irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&w->w) irq_context: 0 (wq_completion)events (work_completion)(&w->w) nfc_devlist_mutex irq_context: 0 (wq_completion)events (work_completion)(&w->w) nfc_devlist_mutex &k->list_lock irq_context: 0 (wq_completion)events (work_completion)(&w->w) nfc_devlist_mutex &k->k_lock irq_context: 0 (wq_completion)events (work_completion)(&w->w) nfc_devlist_mutex &genl_data->genl_data_mutex irq_context: 0 (wq_completion)events (work_completion)(&w->w) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&w->w) pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &rdev->beacon_registrations_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &rdev->mgmt_registrations_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &wdev->pmsr_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem reg_indoor_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem hwsim_radio_lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 nl_table_lock pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &obj_hash[i].lock irq_context: 0 sb_writers#6 irq_context: 0 sb_writers#6 mount_lock irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 tomoyo_ss irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 tomoyo_ss pool_lock#2 irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 tk_core.seq.seqcount irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 &sb->s_type->i_lock_key#8 irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 &wb->list_lock irq_context: 0 sb_writers#6 &sb->s_type->i_mutex_key#10 &wb->list_lock &sb->s_type->i_lock_key#8 irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &u->bindlock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &u->bindlock &net->unx.table.locks[i] irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &u->bindlock &net->unx.table.locks[i] &net->unx.table.locks[i]/1 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &u->bindlock &net->unx.table.locks[i] &net->unx.table.locks[i]/1 &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &u->bindlock &bsd_socket_locks[i] irq_context: 0 &u->lock &sk->sk_peer_lock irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 lock#4 rcu_read_lock pool_lock#2 irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 lock#4 &obj_hash[i].lock irq_context: 0 &u->lock rlock-AF_UNIX irq_context: 0 rcu_read_lock &ei->socket.wq.wait irq_context: 0 rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &u->iolock &mm->mmap_lock irq_context: 0 &u->iolock &obj_hash[i].lock irq_context: 0 &u->iolock pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &sb->s_type->i_lock_key irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &wb->list_lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &wb->list_lock &sb->s_type->i_lock_key irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &ei->i_data_sem irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &ei->i_data_sem pool_lock#2 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &xa->xa_lock#8 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 lock#4 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &dd->lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 bit_wait_table + i irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &group->notification_waitq irq_context: 0 &group->notification_lock irq_context: 0 &client->wait irq_context: softirq rcu_callback rlock-AF_NETLINK irq_context: softirq rcu_callback &dir->lock irq_context: 0 tasklist_lock &sighand->siglock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock quarantine_lock irq_context: 0 syslog_lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &xa->xa_lock#8 pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &u->iolock &meta->lock irq_context: 0 &u->iolock kfence_freelist_lock irq_context: 0 &sb->s_type->i_lock_key#14 irq_context: 0 &sb->s_type->i_lock_key#14 &dentry->d_lock irq_context: 0 &pipe->mutex/1 irq_context: 0 &pipe->rd_wait irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 lock#4 &lruvec->lru_lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 lock#5 irq_context: 0 sb_writers &type->i_mutex_dir_key#2 irq_context: 0 sb_writers &type->i_mutex_dir_key#2 rename_lock.seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key#2 rcu_read_lock &dentry->d_lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_es_lock key#6 irq_context: 0 &sig->cred_guard_mutex &stopper->lock irq_context: 0 &sig->cred_guard_mutex &stop_pi_lock irq_context: 0 &sig->cred_guard_mutex &stop_pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock key#7 irq_context: 0 &sig->cred_guard_mutex &stop_pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock key#8 irq_context: 0 &sig->cred_guard_mutex &x->wait#8 irq_context: 0 &sig->cred_guard_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &c->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: hardirq &dev->power.lock hrtimer_bases.lock irq_context: hardirq &dev->power.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &sem->wait_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &sem->wait_lock irq_context: 0 &mm->mmap_lock &p->pi_lock irq_context: 0 &mm->mmap_lock &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tomoyo_ss &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#5 pool_lock#2 irq_context: 0 &u->lock clock-AF_UNIX irq_context: 0 &u->peer_wait irq_context: 0 rlock-AF_UNIX irq_context: 0 &iint->mutex &lock->wait_lock irq_context: 0 &iint->mutex rcu_read_lock &rq->__lock irq_context: 0 &iint->mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &lock->wait_lock irq_context: 0 &pipe->mutex/1 &pipe->rd_wait irq_context: 0 &pipe->mutex/1 &pipe->rd_wait &p->pi_lock irq_context: 0 &pipe->mutex/1 &pipe->rd_wait &p->pi_lock &rq->__lock irq_context: 0 &pipe->mutex/1 &pipe->rd_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &rq->__lock irq_context: 0 &pipe->mutex/1 &lock->wait_lock irq_context: 0 &pipe->mutex/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &pipe->wr_wait irq_context: 0 &pipe->mutex/1 fs_reclaim irq_context: 0 &pipe->mutex/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &pipe->mutex/1 &____s->seqcount irq_context: 0 &pipe->mutex/1 pool_lock#2 irq_context: 0 &pipe->mutex/1 &mm->mmap_lock irq_context: 0 &pipe->rd_wait &p->pi_lock irq_context: 0 &pipe->rd_wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &pipe->rd_wait &p->pi_lock &rq->__lock irq_context: 0 &pipe->rd_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#7 tk_core.seq.seqcount irq_context: 0 sb_writers#7 mount_lock irq_context: 0 &mm->mmap_lock lock#4 rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock lock#4 &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 &pipe->rd_wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &pipe->mutex/1 &pipe->rd_wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &u->iolock &rq->__lock irq_context: 0 &u->lock &u->peer_wait irq_context: 0 &u->iolock &u->peer_wait irq_context: 0 &u->iolock &u->peer_wait &p->pi_lock irq_context: 0 &u->iolock &u->peer_wait &p->pi_lock &rq->__lock irq_context: 0 &u->iolock &u->peer_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 key#9 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rename_lock.seqcount irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &dentry->d_lock sysctl_lock irq_context: 0 sb_writers#3 &dentry->d_lock irq_context: 0 sb_writers#3 tomoyo_ss irq_context: 0 sb_writers#3 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#3 tomoyo_ss &c->lock irq_context: 0 sb_writers#3 tomoyo_ss &pcp->lock &zone->lock irq_context: 0 sb_writers#3 tomoyo_ss &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#3 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers#3 tomoyo_ss rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#3 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#3 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#3 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers#3 tomoyo_ss pool_lock#2 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 tk_core.seq.seqcount irq_context: 0 sb_writers#3 &mm->mmap_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 batched_entropy_u8.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 kfence_freelist_lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock &c->lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock rcu_read_lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock rcu_read_lock &c->lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock rcu_read_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock &xa->xa_lock#8 pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &iint->mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex rcu_read_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 tk_core.seq.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 rename_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 rename_lock rename_lock.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 rename_lock rename_lock.seqcount &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 &dentry->d_lock/3 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 &dentry->d_lock/3 &____s->seqcount#4 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 &dentry->d_lock/3 &____s->seqcount#4 &____s->seqcount#4/1 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 rename_lock rename_lock.seqcount &dentry->d_lock/2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 rename_lock rename_lock.seqcount &dentry->d_lock/2 &dentry->d_lock/3 irq_context: 0 &u->iolock &u->peer_wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &meta->lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock kfence_freelist_lock irq_context: 0 &iint->mutex sb_writers#4 &rq->__lock irq_context: 0 &iint->mutex sb_writers#4 batched_entropy_u8.lock irq_context: 0 &iint->mutex sb_writers#4 kfence_freelist_lock irq_context: 0 &iint->mutex sb_writers#4 &meta->lock irq_context: 0 &iint->mutex mapping.invalidate_lock lock#4 rcu_read_lock pool_lock#2 irq_context: 0 &iint->mutex mapping.invalidate_lock lock#4 &obj_hash[i].lock irq_context: softirq mm/memcontrol.c:589 rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &iint->mutex mapping.invalidate_lock &xa->xa_lock#8 &c->lock irq_context: 0 &iint->mutex mapping.invalidate_lock &xa->xa_lock#8 &____s->seqcount irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem ptlock_ptr(page) irq_context: 0 &sb->s_type->i_mutex_key#9 &dentry->d_lock &wq irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#9 &obj_hash[i].lock irq_context: 0 sk_lock-AF_UNIX irq_context: 0 sk_lock-AF_UNIX slock-AF_UNIX irq_context: 0 slock-AF_UNIX irq_context: hardirq log_wait.lock &p->pi_lock irq_context: 0 &type->s_umount_key#32 sb_writers#4 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#32 sb_writers#4 &____s->seqcount irq_context: 0 &type->s_umount_key#32 sb_writers#4 pool_lock#2 irq_context: 0 &type->s_umount_key#32 sb_writers#4 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &type->s_umount_key#32 sb_writers#4 &xa->xa_lock#8 irq_context: 0 &type->s_umount_key#32 sb_writers#4 lock#4 irq_context: 0 &type->s_umount_key#32 sb_writers#4 &mapping->private_lock irq_context: 0 &type->s_umount_key#32 sb_writers#4 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#32 sb_writers#4 &dd->lock irq_context: 0 &type->s_umount_key#32 sb_writers#4 lock#4 &lruvec->lru_lock irq_context: 0 &type->s_umount_key#32 sb_writers#4 &c->lock irq_context: 0 &type->s_umount_key#32 sb_writers#4 rcu_read_lock &dd->lock irq_context: 0 &type->s_umount_key#32 sb_writers#4 rcu_read_lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#32 sb_writers#4 rcu_read_lock pool_lock#2 irq_context: 0 &type->s_umount_key#32 sb_writers#4 rcu_read_lock tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#32 sb_writers#4 rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &type->s_umount_key#32 sb_writers#4 &xa->xa_lock#8 pool_lock#2 irq_context: 0 &type->s_umount_key#32 sb_writers#4 bit_wait_table + i irq_context: 0 &type->s_umount_key#32 sb_writers#4 &rq->__lock irq_context: 0 &type->s_umount_key#32 sb_writers#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#32 &eli->li_list_mtx irq_context: 0 &type->s_umount_key#32 sb_writers#4 &journal->j_state_lock irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem pool_lock#2 irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &dd->lock irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &x->wait#26 irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &obj_hash[i].lock irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem rcu_read_lock &pool->lock irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem rcu_node_0 irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &rq->__lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) rcu_read_lock pool_lock#2 irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem rcu_read_lock rcu_node_0 irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &base->lock irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &base->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &x->wait#26 irq_context: softirq &x->wait#26 &p->pi_lock irq_context: softirq &x->wait#26 &p->pi_lock &rq->__lock irq_context: softirq &x->wait#26 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem (&timer.timer) irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &fq->mq_flush_lock irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &fq->mq_flush_lock tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &fq->mq_flush_lock &q->requeue_lock irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &fq->mq_flush_lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &fq->mq_flush_lock rcu_read_lock &pool->lock irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &fq->mq_flush_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &fq->mq_flush_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &meta_group_info[i]->alloc_sem &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &fq->mq_flush_lock &x->wait#26 irq_context: 0 &type->s_umount_key#32 sb_writers#4 jbd2_handle &journal->j_wait_updates irq_context: 0 &type->s_umount_key#32 sb_writers#4 &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock &____s->seqcount irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &c->lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &____s->seqcount irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &xa->xa_lock#8 pool_lock#2 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle pool_lock#2 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &ret->b_state_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &journal->j_revoke_lock irq_context: 0 &ei->xattr_sem irq_context: 0 &iint->mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &iint->mutex mapping.invalidate_lock &rq->__lock irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem &c->lock irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem &____s->seqcount irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_es_lock key#6 irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock key#7 irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock key#8 irq_context: 0 &type->i_mutex_dir_key#3 &obj_hash[i].lock pool_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &xattrs->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &n->list_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &n->list_lock &c->lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 fs_reclaim irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &dentry->d_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 tomoyo_ss irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 tomoyo_ss rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &s->s_inode_list_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &p->alloc_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 &dentry->d_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 &dentry->d_lock &wq#2 irq_context: 0 sb_writers#3 oom_adj_mutex irq_context: 0 sb_writers#3 oom_adj_mutex rcu_read_lock &p->alloc_lock irq_context: 0 sb_writers#3 oom_adj_mutex &p->alloc_lock irq_context: 0 low_water_lock console_owner_lock irq_context: 0 low_water_lock console_owner irq_context: 0 &group->mark_mutex &sb->s_type->i_lock_key#22 irq_context: 0 &group->mark_mutex &sb->s_type->i_lock_key#22 &dentry->d_lock irq_context: 0 &group->mark_mutex &sb->s_type->i_lock_key#22 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 &type->i_mutex_dir_key#5 &c->lock irq_context: 0 &type->i_mutex_dir_key#5 &____s->seqcount irq_context: 0 &group->mark_mutex &sb->s_type->i_lock_key irq_context: 0 &group->mark_mutex &sb->s_type->i_lock_key &dentry->d_lock irq_context: 0 &sk->sk_peer_lock irq_context: 0 &ep->mtx irq_context: 0 epnested_mutex irq_context: 0 epnested_mutex &ep->mtx irq_context: 0 epnested_mutex &ep->mtx fs_reclaim irq_context: 0 epnested_mutex &ep->mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 epnested_mutex &ep->mtx &____s->seqcount irq_context: 0 epnested_mutex &ep->mtx pool_lock#2 irq_context: 0 epnested_mutex &ep->mtx &c->lock irq_context: 0 epnested_mutex &ep->mtx &f->f_lock irq_context: 0 epnested_mutex &ep->mtx &ei->socket.wq.wait irq_context: 0 epnested_mutex &ep->mtx &ep->lock irq_context: 0 epnested_mutex rcu_read_lock &f->f_lock irq_context: 0 &ep->mtx fs_reclaim irq_context: 0 &ep->mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &ep->mtx &f->f_lock irq_context: 0 &ep->mtx pool_lock#2 irq_context: 0 &ep->mtx &group->notification_waitq irq_context: 0 &ep->mtx &group->notification_lock irq_context: 0 &ep->mtx &ep->lock irq_context: 0 &ep->mtx &sighand->signalfd_wqh irq_context: 0 &ep->mtx &sighand->siglock irq_context: 0 &ep->mtx &ei->socket.wq.wait irq_context: 0 &ep->lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &dentry->d_lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock &pcp->lock &zone->lock &____s->seqcount irq_context: hardirq log_wait.lock &p->pi_lock &rq->__lock irq_context: hardirq log_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: hardirq log_wait.lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &type->i_mutex_dir_key#4 rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &c->lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#4 &sb->s_type->i_lock_key#24 &dentry->d_lock &wq#2 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &dentry->d_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &root->kernfs_rwsem irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &mm->mmap_lock fs_reclaim irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &mm->mmap_lock &____s->seqcount irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &mm->mmap_lock ptlock_ptr(page)#2 key irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &mm->mmap_lock ptlock_ptr(page)#2 lock#4 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 tk_core.seq.seqcount irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &pcp->lock &zone->lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#4 &pcp->lock &zone->lock irq_context: 0 &type->i_mutex_dir_key#4 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 sb_writers#8 mount_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 sb_writers#8 tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 sb_writers#8 &sb->s_type->i_lock_key#24 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 sb_writers#8 &wb->list_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 sb_writers#8 &wb->list_lock &sb->s_type->i_lock_key#24 irq_context: 0 remove_cache_srcu irq_context: 0 remove_cache_srcu quarantine_lock irq_context: 0 remove_cache_srcu &c->lock irq_context: 0 remove_cache_srcu &n->list_lock irq_context: 0 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 remove_cache_srcu &obj_hash[i].lock irq_context: 0 tomoyo_ss &n->list_lock irq_context: 0 tomoyo_ss &n->list_lock &c->lock irq_context: 0 &type->i_mutex_dir_key#4 remove_cache_srcu irq_context: 0 &type->i_mutex_dir_key#4 remove_cache_srcu quarantine_lock irq_context: 0 &type->i_mutex_dir_key#4 remove_cache_srcu &c->lock irq_context: 0 &type->i_mutex_dir_key#4 remove_cache_srcu &n->list_lock irq_context: 0 &type->i_mutex_dir_key#4 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#4 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem batched_entropy_u8.lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem kfence_freelist_lock irq_context: 0 swap_lock irq_context: 0 sb_writers#8 irq_context: 0 sb_writers#8 mount_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rename_lock.seqcount irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 fs_reclaim irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &dentry->d_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem inode_hash_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem fs_reclaim irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem inode_hash_lock &sb->s_type->i_lock_key#24 irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem inode_hash_lock &s->s_inode_list_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem tk_core.seq.seqcount irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &sb->s_type->i_lock_key#24 irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &sb->s_type->i_lock_key#24 irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &sb->s_type->i_lock_key#24 &dentry->d_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &sb->s_type->i_lock_key#24 &dentry->d_lock &wq#2 irq_context: 0 kn->active fs_reclaim irq_context: 0 kn->active fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active pool_lock#2 irq_context: 0 kn->active &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active &kernfs_locks->open_file_mutex[count] pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#13 irq_context: 0 sb_writers#8 fs_reclaim irq_context: 0 sb_writers#8 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 pool_lock#2 irq_context: 0 sb_writers#8 &mm->mmap_lock irq_context: 0 sb_writers#8 &of->mutex irq_context: 0 sb_writers#8 &of->mutex kn->active &k->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active uevent_sock_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active uevent_sock_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active uevent_sock_mutex pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active uevent_sock_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 sb_writers#8 &of->mutex kn->active uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 sb_writers#8 &of->mutex kn->active uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 sb_writers#8 &of->mutex kn->active uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 sb_writers#8 &of->mutex kn->active uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &of->mutex kn->active uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem irq_context: 0 &mm->mmap_lock &anon_vma->rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#8 &of->mutex kn->active uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active uevent_sock_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active uevent_sock_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active &obj_hash[i].lock irq_context: 0 sb_writers#8 &obj_hash[i].lock irq_context: 0 &kernfs_locks->open_file_mutex[count] irq_context: 0 &kernfs_locks->open_file_mutex[count] &obj_hash[i].lock irq_context: 0 &kernfs_locks->open_file_mutex[count] pool_lock#2 irq_context: 0 &kernfs_locks->open_file_mutex[count] krc.lock irq_context: 0 kn->active#2 fs_reclaim irq_context: 0 kn->active#2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#2 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#2 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex &____s->seqcount irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#5 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#5 &dentry->d_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#5 tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#5 sb_writers#5 mount_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#5 sb_writers#5 tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#5 sb_writers#5 &sb->s_type->i_lock_key irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#5 sb_writers#5 &wb->list_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#5 sb_writers#5 &wb->list_lock &sb->s_type->i_lock_key irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] pool_lock#2 irq_context: 0 kn->active#2 &c->lock irq_context: 0 kn->active#2 &____s->seqcount irq_context: 0 kn->active#2 pool_lock#2 irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &c->lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active uevent_sock_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active uevent_sock_mutex &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active &n->list_lock &c->lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &c->lock irq_context: 0 kn->active &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 &c->lock irq_context: 0 kn->active &____s->seqcount irq_context: 0 kn->active#2 &n->list_lock irq_context: 0 kn->active#2 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &rq->__lock irq_context: 0 &kernfs_locks->open_file_mutex[count] &obj_hash[i].lock pool_lock irq_context: 0 tomoyo_ss remove_cache_srcu irq_context: 0 tomoyo_ss remove_cache_srcu quarantine_lock irq_context: 0 tomoyo_ss remove_cache_srcu &c->lock irq_context: 0 tomoyo_ss remove_cache_srcu &n->list_lock irq_context: 0 tomoyo_ss remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 tomoyo_ss remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#2 &____s->seqcount irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 &n->list_lock irq_context: 0 sb_writers#8 &c->lock irq_context: 0 sb_writers#8 &n->list_lock irq_context: 0 sb_writers#8 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &n->list_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &n->list_lock &c->lock irq_context: 0 rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#2 &n->list_lock &c->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#2 &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 quarantine_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu &c->lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &____s->seqcount irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &ep->mtx &____s->seqcount irq_context: 0 &ep->mtx &c->lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 tomoyo_ss &c->lock irq_context: 0 &type->i_mutex_dir_key#5 rcu_read_lock &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#5 &obj_hash[i].lock irq_context: 0 sb_writers#3 tomoyo_ss quarantine_lock irq_context: 0 kn->active#2 &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] fs_reclaim &rq->__lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &c->lock irq_context: 0 &kernfs_locks->open_file_mutex[count] fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &kernfs_locks->open_file_mutex[count] fill_pool_map-wait-type-override pool_lock irq_context: 0 kn->active &n->list_lock irq_context: 0 kn->active &n->list_lock &c->lock irq_context: 0 &type->i_mutex_dir_key#5 &n->list_lock irq_context: 0 &type->i_mutex_dir_key#5 &n->list_lock &c->lock irq_context: 0 kn->active &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 kn->active remove_cache_srcu irq_context: 0 kn->active remove_cache_srcu quarantine_lock irq_context: 0 kn->active remove_cache_srcu &c->lock irq_context: 0 kn->active remove_cache_srcu &n->list_lock irq_context: 0 kn->active remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 kn->active remove_cache_srcu &obj_hash[i].lock irq_context: 0 &kernfs_locks->open_file_mutex[count] fill_pool_map-wait-type-override &c->lock irq_context: 0 &kernfs_locks->open_file_mutex[count] fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 sb_writers#8 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active uevent_sock_mutex fs_reclaim &rq->__lock irq_context: 0 tomoyo_ss rcu_read_lock rcu_node_0 irq_context: 0 tomoyo_ss rcu_read_lock &rq->__lock irq_context: 0 &eli->li_list_mtx &obj_hash[i].lock irq_context: 0 &eli->li_list_mtx pool_lock#2 irq_context: 0 ext4_li_mtx irq_context: 0 ext4_li_mtx &eli->li_list_mtx irq_context: 0 ext4_li_mtx &obj_hash[i].lock irq_context: 0 ext4_li_mtx pool_lock#2 irq_context: 0 tomoyo_ss rcu_read_lock &rcu_state.gp_wq irq_context: 0 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 remove_cache_srcu irq_context: 0 sb_writers#8 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#8 remove_cache_srcu &c->lock irq_context: 0 sb_writers#8 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#8 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#8 remove_cache_srcu rcu_read_lock &____s->seqcount irq_context: 0 sb_writers#8 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#8 remove_cache_srcu &obj_hash[i].lock irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] &rq->__lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 remove_cache_srcu irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 remove_cache_srcu &c->lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex &rq->__lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &rq->__lock irq_context: 0 &root->kernfs_rwsem &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &n->list_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &n->list_lock &c->lock irq_context: 0 rcu_read_lock &vma->vm_lock->lock &rq->__lock irq_context: 0 &mm->mmap_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &____s->seqcount irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 pool_lock#2 irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] &n->list_lock irq_context: 0 kn->active#2 &kernfs_locks->open_file_mutex[count] &n->list_lock &c->lock irq_context: 0 kn->active#2 remove_cache_srcu irq_context: 0 kn->active#2 remove_cache_srcu quarantine_lock irq_context: hardirq &dev->power.lock rcu_read_lock &pool->lock irq_context: hardirq &dev->power.lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: hardirq &dev->power.lock rcu_read_lock &pool->lock pool_lock#2 irq_context: hardirq &dev->power.lock rcu_read_lock &pool->lock &p->pi_lock irq_context: hardirq &dev->power.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: hardirq &dev->power.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &dev->power.lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &dev->power.lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)pm (work_completion)(&dev->power.work) &dev->power.lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &of->mutex kn->active remove_cache_srcu irq_context: 0 sb_writers#8 &of->mutex kn->active remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#8 &of->mutex kn->active remove_cache_srcu &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active remove_cache_srcu &obj_hash[i].lock irq_context: 0 &kernfs_locks->open_file_mutex[count] &rq->__lock irq_context: 0 tomoyo_ss remove_cache_srcu &rq->__lock irq_context: 0 kn->active#2 remove_cache_srcu &c->lock irq_context: 0 kn->active#2 remove_cache_srcu &n->list_lock irq_context: 0 kn->active#2 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 kn->active#2 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 kn->active#2 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &kernfs_locks->open_file_mutex[count] fill_pool_map-wait-type-override &rq->__lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 &kernfs_locks->open_file_mutex[count] krc.lock &obj_hash[i].lock irq_context: 0 &kernfs_locks->open_file_mutex[count] krc.lock &base->lock irq_context: 0 &kernfs_locks->open_file_mutex[count] krc.lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 batched_entropy_u8.lock irq_context: 0 sb_writers#8 kfence_freelist_lock irq_context: 0 sb_writers#8 &meta->lock irq_context: 0 remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) krc.lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback &obj_hash[i].lock pool_lock irq_context: 0 tomoyo_ss rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 kn->active#2 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex batched_entropy_u8.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#2 uevent_sock_mutex kfence_freelist_lock irq_context: 0 kn->active#2 remove_cache_srcu &rq->__lock irq_context: 0 tomoyo_ss rcu_node_0 irq_context: 0 kn->active#2 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 kn->active#3 fs_reclaim irq_context: 0 kn->active#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#3 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#3 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#3 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#3 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#3 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#3 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex &____s->seqcount irq_context: 0 kn->active#3 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 &n->list_lock irq_context: 0 kn->active#3 &____s->seqcount irq_context: 0 kn->active#3 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#3 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#3 &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex &rq->__lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#8 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 kn->active#3 &kernfs_locks->open_file_mutex[count] &rq->__lock irq_context: 0 kn->active#3 &n->list_lock irq_context: 0 kn->active#3 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 uevent_sock_mutex rcu_node_0 irq_context: 0 &type->i_mutex_dir_key#5 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 &type->i_mutex_dir_key#5 &dentry->d_lock/1 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &sem->wait_lock irq_context: 0 &sem->wait_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 quarantine_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem fs_reclaim &rq->__lock irq_context: 0 kn->active#3 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 tomoyo_ss batched_entropy_u8.lock irq_context: 0 tomoyo_ss kfence_freelist_lock irq_context: 0 tomoyo_ss &meta->lock irq_context: 0 &ep->mtx &rq->__lock irq_context: 0 kn->active#3 remove_cache_srcu irq_context: 0 kn->active#3 remove_cache_srcu quarantine_lock irq_context: 0 kn->active#3 remove_cache_srcu &c->lock irq_context: 0 kn->active#3 remove_cache_srcu &n->list_lock irq_context: 0 kn->active#3 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu quarantine_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &n->list_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock remove_cache_srcu irq_context: 0 &mm->mmap_lock remove_cache_srcu quarantine_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem fill_pool_map-wait-type-override pool_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem fill_pool_map-wait-type-override &c->lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &sb->s_type->i_lock_key#24 irq_context: 0 &type->i_mutex_dir_key#4 &n->list_lock irq_context: 0 &type->i_mutex_dir_key#4 &n->list_lock &c->lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu quarantine_lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu &c->lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu &n->list_lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &rq->__lock irq_context: 0 sb_writers#8 tk_core.seq.seqcount irq_context: 0 sb_writers#8 &sb->s_type->i_lock_key#24 irq_context: 0 sb_writers#8 &wb->list_lock irq_context: 0 sb_writers#8 &wb->list_lock &sb->s_type->i_lock_key#24 irq_context: 0 &type->i_mutex_dir_key#4 rcu_read_lock rcu_node_0 irq_context: 0 &type->i_mutex_dir_key#4 rcu_read_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#4 rcu_node_0 irq_context: 0 &type->i_mutex_dir_key#4 &rq->__lock irq_context: 0 &type->i_mutex_dir_key#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tomoyo_ss remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 tomoyo_ss remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 &f->f_pos_lock rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &root->kernfs_rwsem &rq->__lock irq_context: 0 &type->i_mutex_dir_key#5 remove_cache_srcu irq_context: 0 &type->i_mutex_dir_key#5 remove_cache_srcu quarantine_lock irq_context: 0 &type->i_mutex_dir_key#5 batched_entropy_u8.lock irq_context: 0 &type->i_mutex_dir_key#5 kfence_freelist_lock irq_context: 0 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem rcu_read_lock rcu_node_0 irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &rq->__lock irq_context: 0 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem fs_reclaim &rq->__lock irq_context: 0 kn->active#4 fs_reclaim irq_context: 0 kn->active#4 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#4 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#4 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#4 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &p->lock &of->mutex irq_context: 0 &p->lock &of->mutex kn->active#4 param_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rename_lock.seqcount irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem irq_context: 0 sb_writers#8 &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &dentry->d_lock irq_context: 0 sb_writers#8 tomoyo_ss irq_context: 0 sb_writers#8 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 tomoyo_ss &c->lock irq_context: 0 sb_writers#8 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#8 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#8 kn->active#4 fs_reclaim irq_context: 0 sb_writers#8 kn->active#4 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 kn->active#4 &kernfs_locks->open_file_mutex[count] irq_context: 0 sb_writers#8 kn->active#4 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 sb_writers#8 kn->active#4 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 irq_context: 0 sb_writers#8 iattr_mutex irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 tk_core.seq.seqcount irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &root->kernfs_iattr_rwsem iattr_mutex irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &root->kernfs_iattr_rwsem iattr_mutex fs_reclaim irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &root->kernfs_iattr_rwsem iattr_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &root->kernfs_iattr_rwsem &sem->wait_lock irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &root->kernfs_iattr_rwsem iattr_mutex &____s->seqcount irq_context: 0 &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &root->kernfs_iattr_rwsem iattr_mutex pool_lock#2 irq_context: 0 &root->kernfs_iattr_rwsem rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &root->kernfs_iattr_rwsem iattr_mutex &c->lock irq_context: 0 &root->kernfs_iattr_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &root->kernfs_iattr_rwsem iattr_mutex tk_core.seq.seqcount irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &sem->wait_lock irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &p->pi_lock irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#4 param_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#4 param_lock disk_events_mutex irq_context: 0 remove_cache_srcu rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem remove_cache_srcu &rq->__lock irq_context: 0 &vma->vm_lock->lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#5 &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#5 &sem->wait_lock irq_context: 0 &type->i_mutex_dir_key#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &sem->wait_lock irq_context: 0 sb_writers#5 &p->pi_lock irq_context: 0 sb_writers#5 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &rq->__lock irq_context: 0 &type->i_mutex_dir_key#4 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &type->i_mutex_dir_key#4 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &vma->vm_lock->lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 kn->active#5 fs_reclaim irq_context: 0 kn->active#5 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#5 &c->lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#5 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#5 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex remove_cache_srcu irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex remove_cache_srcu &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &c->lock irq_context: 0 kn->active#5 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &____s->seqcount irq_context: 0 kn->active#5 batched_entropy_u8.lock irq_context: 0 kn->active#5 kfence_freelist_lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] &n->list_lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 kn->active#5 &n->list_lock irq_context: 0 kn->active#5 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 remove_cache_srcu irq_context: 0 sb_writers#8 &of->mutex kn->active#5 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 remove_cache_srcu &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#5 remove_cache_srcu &obj_hash[i].lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &device->physical_node_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 udc_lock irq_context: 0 kn->active#5 remove_cache_srcu irq_context: 0 kn->active#5 remove_cache_srcu quarantine_lock irq_context: 0 kn->active#5 remove_cache_srcu &c->lock irq_context: 0 kn->active#5 remove_cache_srcu &n->list_lock irq_context: 0 kn->active#5 remove_cache_srcu &obj_hash[i].lock irq_context: 0 kn->active#5 remove_cache_srcu rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 kn->active#5 remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback &base->lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback &base->lock &obj_hash[i].lock irq_context: 0 kn->active#5 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#5 fw_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 quarantine_lock irq_context: 0 sb_writers#8 &of->mutex &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &rq->__lock irq_context: 0 kn->active#5 remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 kn->active#5 remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &pcp->lock &zone->lock irq_context: 0 sb_writers#8 fs_reclaim &rq->__lock irq_context: 0 &type->i_mutex_dir_key#5 &pcp->lock &zone->lock irq_context: 0 &type->i_mutex_dir_key#5 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#5 rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#13 &rq->__lock irq_context: 0 &p->lock &of->mutex kn->active#5 fs_reclaim irq_context: 0 &p->lock &of->mutex kn->active#5 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &p->lock &of->mutex kn->active#5 &c->lock irq_context: 0 &p->lock &of->mutex kn->active#5 &n->list_lock irq_context: 0 &p->lock &of->mutex kn->active#5 &n->list_lock &c->lock irq_context: 0 &p->lock &of->mutex kn->active#5 pool_lock#2 irq_context: 0 &p->lock &of->mutex kn->active#5 &obj_hash[i].lock irq_context: 0 &p->lock &of->mutex kn->active#5 &____s->seqcount irq_context: 0 &p->lock &n->list_lock irq_context: 0 &p->lock &n->list_lock &c->lock irq_context: 0 kn->active#6 fs_reclaim irq_context: 0 kn->active#6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#6 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#6 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#6 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#7 fs_reclaim irq_context: 0 kn->active#7 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#7 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#7 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#7 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#8 fs_reclaim irq_context: 0 kn->active#8 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#8 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#8 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#8 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#8 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#8 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 kn->active#9 fs_reclaim irq_context: 0 kn->active#9 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#9 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#9 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#9 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#10 fs_reclaim irq_context: 0 kn->active#10 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#10 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#10 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#10 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#11 fs_reclaim irq_context: 0 kn->active#11 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#11 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#11 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#11 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#12 fs_reclaim irq_context: 0 kn->active#12 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#12 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#12 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#12 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &____s->seqcount irq_context: 0 &p->lock remove_cache_srcu irq_context: 0 &p->lock remove_cache_srcu quarantine_lock irq_context: 0 &p->lock remove_cache_srcu &c->lock irq_context: 0 &p->lock remove_cache_srcu &n->list_lock irq_context: 0 &p->lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &p->lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 &vma->vm_lock->lock rcu_node_0 irq_context: 0 &p->lock &of->mutex kn->active#5 quarantine_lock irq_context: 0 kn->active#13 fs_reclaim irq_context: 0 kn->active#13 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#13 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#13 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#13 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#14 fs_reclaim irq_context: 0 kn->active#14 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#14 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#14 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#14 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &p->lock &of->mutex kn->active#14 fs_reclaim irq_context: 0 &p->lock &of->mutex kn->active#14 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &p->lock &of->mutex kn->active#14 pool_lock#2 irq_context: 0 &p->lock &of->mutex kn->active#14 &obj_hash[i].lock irq_context: 0 kn->active#14 &c->lock irq_context: 0 kn->active#14 &____s->seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock rcu_node_0 irq_context: 0 &vma->vm_lock->lock rcu_read_lock rcu_node_0 irq_context: 0 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#13 &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &dentry->d_lock/1 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &obj_hash[i].lock pool_lock irq_context: 0 &p->lock &of->mutex kn->active#5 &pcp->lock &zone->lock irq_context: 0 &p->lock &of->mutex kn->active#5 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 kn->active#10 &c->lock irq_context: 0 &rnp->exp_lock irq_context: 0 rcu_state.exp_mutex irq_context: 0 rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &n->list_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &n->list_lock &c->lock irq_context: 0 rcu_state.exp_mutex.wait_lock irq_context: 0 kn->active#11 &c->lock irq_context: 0 kn->active#11 &n->list_lock irq_context: 0 kn->active#11 &n->list_lock &c->lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &dentry->d_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &fsnotify_mark_srcu irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &fsnotify_mark_srcu fs_reclaim irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &fsnotify_mark_srcu fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &fsnotify_mark_srcu pool_lock#2 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &fsnotify_mark_srcu &group->notification_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &fsnotify_mark_srcu &group->notification_waitq irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &fsnotify_mark_srcu &group->notification_waitq &p->pi_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &fsnotify_mark_srcu &group->notification_waitq &p->pi_lock &rq->__lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &fsnotify_mark_srcu &group->notification_waitq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &p->lock &pcp->lock &zone->lock irq_context: 0 &p->lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &p->lock &of->mutex kn->active#5 &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sem->wait_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#5 remove_cache_srcu &c->lock irq_context: 0 &type->i_mutex_dir_key#5 remove_cache_srcu &n->list_lock irq_context: 0 &type->i_mutex_dir_key#5 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex fs_reclaim &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &rfkill->lock irq_context: 0 &p->lock &of->mutex kn->active#5 &device->physical_node_lock irq_context: 0 kn->active#15 fs_reclaim irq_context: 0 kn->active#15 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#15 &c->lock irq_context: 0 kn->active#15 &n->list_lock irq_context: 0 kn->active#15 &n->list_lock &c->lock irq_context: 0 kn->active#15 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#15 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#15 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &p->lock &of->mutex kn->active#15 dev_base_lock irq_context: 0 kn->active#16 fs_reclaim irq_context: 0 kn->active#16 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#16 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#16 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#16 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &p->lock &of->mutex kn->active#16 dev_base_lock irq_context: 0 kn->active#17 fs_reclaim irq_context: 0 kn->active#17 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#17 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#17 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#17 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#18 fs_reclaim irq_context: 0 kn->active#18 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#18 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#18 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#18 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &p->lock &of->mutex kn->active#18 dev_base_lock irq_context: 0 kn->active#19 fs_reclaim irq_context: 0 kn->active#19 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#19 &c->lock irq_context: 0 kn->active#19 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#19 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#19 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &p->lock &of->mutex kn->active#19 dev_base_lock irq_context: 0 kn->active#20 fs_reclaim irq_context: 0 kn->active#20 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#20 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#20 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#20 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &p->lock &of->mutex kn->active#20 dev_base_lock irq_context: 0 kn->active#21 fs_reclaim irq_context: 0 kn->active#21 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#21 pool_lock#2 irq_context: 0 kn->active#21 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#21 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#21 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &of->mutex irq_context: 0 &of->mutex kn->active#21 &dev->power.lock irq_context: 0 &of->mutex kn->active#21 pci_lock irq_context: 0 &of->mutex kn->active#21 pci_lock pci_config_lock irq_context: 0 kn->active#22 fs_reclaim irq_context: 0 kn->active#22 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#22 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#22 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#22 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#23 fs_reclaim irq_context: 0 kn->active#23 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#23 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#23 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#23 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#23 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#23 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 kn->active#24 fs_reclaim irq_context: 0 kn->active#24 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#24 &c->lock irq_context: 0 kn->active#24 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#24 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#24 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#25 fs_reclaim irq_context: 0 kn->active#25 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#25 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#25 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#25 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#26 fs_reclaim irq_context: 0 kn->active#26 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#26 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#26 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#26 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &n->list_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &n->list_lock &c->lock irq_context: 0 &ep->mtx &pipe->rd_wait irq_context: 0 kn->active#27 fs_reclaim irq_context: 0 kn->active#27 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#27 remove_cache_srcu irq_context: 0 kn->active#27 remove_cache_srcu quarantine_lock irq_context: 0 kn->active#27 remove_cache_srcu &c->lock irq_context: 0 kn->active#27 remove_cache_srcu &n->list_lock irq_context: 0 kn->active#27 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#2 &c->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss &n->list_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss &n->list_lock &c->lock irq_context: 0 kn->active#27 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#27 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#27 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &n->list_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &n->list_lock &c->lock irq_context: 0 kn->active#28 fs_reclaim irq_context: 0 kn->active#28 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#28 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#28 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#28 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#29 fs_reclaim irq_context: 0 kn->active#29 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#29 &c->lock irq_context: 0 kn->active#29 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#29 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#29 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#30 fs_reclaim irq_context: 0 kn->active#30 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#30 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#30 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#30 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#31 fs_reclaim irq_context: 0 kn->active#31 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#31 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#31 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#31 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#32 fs_reclaim irq_context: 0 kn->active#32 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#32 &c->lock irq_context: 0 kn->active#32 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#32 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#32 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#33 fs_reclaim irq_context: 0 kn->active#33 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#33 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#33 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#33 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu quarantine_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu &c->lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu &n->list_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu &obj_hash[i].lock irq_context: 0 kn->active#30 &c->lock irq_context: 0 kn->active#31 &c->lock irq_context: 0 kn->active#32 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#32 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &n->list_lock &c->lock irq_context: 0 &type->i_mutex_dir_key#2 batched_entropy_u8.lock irq_context: 0 &type->i_mutex_dir_key#2 kfence_freelist_lock irq_context: 0 kn->active#27 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#28 &c->lock irq_context: 0 kn->active#28 &n->list_lock irq_context: 0 kn->active#28 &n->list_lock &c->lock irq_context: 0 kn->active#28 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#33 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &pcp->lock &zone->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss quarantine_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss &n->list_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss &n->list_lock &c->lock irq_context: 0 &u->iolock quarantine_lock irq_context: 0 sb_writers#8 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#27 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 kn->active#27 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &p->lock &of->mutex kn->active#5 udc_lock irq_context: 0 kn->active#5 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &pcp->lock &zone->lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 kn->active#33 &c->lock irq_context: 0 kn->active#33 &n->list_lock irq_context: 0 kn->active#33 &n->list_lock &c->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &n->list_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback quarantine_lock irq_context: 0 kn->active#6 &c->lock irq_context: 0 kn->active#9 &c->lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] &rq->__lock irq_context: 0 &p->lock &of->mutex kn->active#14 &c->lock irq_context: 0 kn->active#12 &c->lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &fsnotify_mark_srcu &obj_hash[i].lock irq_context: 0 &mousedev->client_lock irq_context: 0 &mousedev->mutex#2 irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &fsnotify_mark_srcu irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &fsnotify_mark_srcu fs_reclaim irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &fsnotify_mark_srcu fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &fsnotify_mark_srcu pool_lock#2 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &fsnotify_mark_srcu &group->notification_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &fsnotify_mark_srcu &group->notification_waitq irq_context: 0 tomoyo_ss rcu_read_lock rename_lock irq_context: 0 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq irq_context: 0 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 &rnp->exp_lock irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 &rnp->exp_wq[3] irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 &rq->__lock irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#7 &c->lock irq_context: 0 &rnp->exp_wq[0] irq_context: 0 &evdev->mutex &dev->mutex#2 &rnp->exp_lock irq_context: 0 &evdev->mutex &dev->mutex#2 &rnp->exp_wq[0] irq_context: 0 &evdev->mutex &dev->mutex#2 &rq->__lock irq_context: 0 &evdev->mutex &dev->mutex#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &hctx->lock irq_context: 0 rcu_read_lock &hctx->lock irq_context: 0 rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &p->lock &of->mutex kn->active#5 fw_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex &rq->__lock irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex rcu_node_0 irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &pipe->rd_wait &ep->lock irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &pipe->rd_wait &ep->lock &ep->wq irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &pipe->rd_wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->rd_wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 &pipe->rd_wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &mousedev->mutex#2 &dev->mutex#2 &obj_hash[i].lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &fsnotify_mark_srcu &rq->__lock irq_context: 0 &pipe->mutex/1 &pipe->rd_wait &ep->lock irq_context: 0 &pipe->mutex/1 &pipe->rd_wait &ep->lock &ep->wq irq_context: 0 &pipe->mutex/1 &pipe->rd_wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 &pipe->mutex/1 &pipe->rd_wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 &pipe->mutex/1 &pipe->rd_wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx rcu_read_lock &pipe->rd_wait irq_context: 0 &ep->mtx &obj_hash[i].lock irq_context: 0 &sighand->signalfd_wqh irq_context: 0 tasklist_lock &sighand->siglock &sighand->signalfd_wqh irq_context: 0 tasklist_lock &sighand->siglock &sighand->signalfd_wqh &p->pi_lock irq_context: 0 tasklist_lock &sighand->siglock &sighand->signalfd_wqh &p->pi_lock &rq->__lock irq_context: 0 tasklist_lock &sighand->siglock &sighand->signalfd_wqh &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tasklist_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock irq_context: 0 &sb->s_type->i_mutex_key#14 irq_context: 0 mapping.invalidate_lock#2 irq_context: 0 mapping.invalidate_lock#2 mmu_notifier_invalidate_range_start irq_context: 0 mapping.invalidate_lock#2 &____s->seqcount irq_context: 0 mapping.invalidate_lock#2 pool_lock#2 irq_context: 0 mapping.invalidate_lock#2 &xa->xa_lock#8 irq_context: 0 mapping.invalidate_lock#2 &xa->xa_lock#8 pool_lock#2 irq_context: 0 mapping.invalidate_lock#2 lock#4 irq_context: 0 mapping.invalidate_lock#2 tk_core.seq.seqcount irq_context: 0 mapping.invalidate_lock#2 &dd->lock irq_context: 0 mapping.invalidate_lock#2 rcu_read_lock &dd->lock irq_context: 0 mapping.invalidate_lock#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 mapping.invalidate_lock#2 rcu_read_lock rcu_node_0 irq_context: 0 mapping.invalidate_lock#2 rcu_read_lock &rq->__lock irq_context: 0 mapping.invalidate_lock#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 mapping.invalidate_lock#2 rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &cfs_rq->removed.lock irq_context: 0 sb_writers#8 kn->active#5 fs_reclaim irq_context: 0 sb_writers#8 kn->active#5 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 kn->active#5 &c->lock irq_context: 0 sb_writers#8 kn->active#5 &n->list_lock irq_context: 0 sb_writers#8 kn->active#5 &n->list_lock &c->lock irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss remove_cache_srcu irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss remove_cache_srcu quarantine_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss remove_cache_srcu &c->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss remove_cache_srcu &n->list_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss remove_cache_srcu &obj_hash[i].lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] remove_cache_srcu irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] remove_cache_srcu quarantine_lock irq_context: 0 &type->i_mutex_dir_key#4 rcu_read_lock &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#4 &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#4 &sb->s_type->i_lock_key#24 &dentry->d_lock &wq &p->pi_lock irq_context: 0 &type->i_mutex_dir_key#4 &sb->s_type->i_lock_key#24 &dentry->d_lock &wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &type->i_mutex_dir_key#4 &sb->s_type->i_lock_key#24 &dentry->d_lock &wq &p->pi_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#4 &sb->s_type->i_lock_key#24 &dentry->d_lock &wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 remove_cache_srcu irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 remove_cache_srcu &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &rq->__lock irq_context: 0 &mm->mmap_lock remove_cache_srcu irq_context: 0 &mm->mmap_lock remove_cache_srcu quarantine_lock irq_context: 0 &mm->mmap_lock remove_cache_srcu &c->lock irq_context: 0 &mm->mmap_lock remove_cache_srcu &n->list_lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#5 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &type->i_mutex_dir_key#2 &n->list_lock irq_context: 0 &type->i_mutex_dir_key#2 &n->list_lock &c->lock irq_context: 0 sb_writers#8 tomoyo_ss &n->list_lock irq_context: 0 sb_writers#8 tomoyo_ss &n->list_lock &c->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss &rq->__lock irq_context: 0 mapping.invalidate_lock#2 lock#4 &lruvec->lru_lock irq_context: 0 kn->active#27 &c->lock irq_context: 0 kn->active#5 &rq->__lock irq_context: 0 mapping.invalidate_lock#2 &xa->xa_lock#8 &c->lock irq_context: 0 kn->active#27 &n->list_lock irq_context: 0 kn->active#27 &n->list_lock &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 kn->active#30 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#30 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 mapping.invalidate_lock#2 rcu_read_lock pool_lock#2 irq_context: 0 kn->active#33 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss quarantine_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock &rq->__lock irq_context: 0 sb_writers#5 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss &rq->__lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#5 &rq->__lock &cfs_rq->removed.lock irq_context: 0 kn->active#29 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#29 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 &mm->mmap_lock &lruvec->lru_lock irq_context: 0 &mm->mmap_lock rcu_read_lock pool_lock#2 irq_context: 0 kn->active#28 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#2 &____s->seqcount irq_context: 0 kn->active#31 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#31 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 kn->active#32 &n->list_lock irq_context: 0 kn->active#32 &n->list_lock &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 tk_core.seq.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rename_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rename_lock rename_lock.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rename_lock rename_lock.seqcount &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 &dentry->d_lock/3 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 &dentry->d_lock/3 &____s->seqcount#4 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 &dentry->d_lock/3 &____s->seqcount#4 &____s->seqcount#4/1 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rename_lock rename_lock.seqcount &dentry->d_lock/2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rename_lock rename_lock.seqcount &dentry->d_lock/2 &dentry->d_lock/3 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &fsnotify_mark_srcu irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_lock_key &xa->xa_lock#8 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 lock#4 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 lock#5 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &lruvec->lru_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &info->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &xa->xa_lock#8 irq_context: 0 kn->active#34 fs_reclaim irq_context: 0 kn->active#34 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#34 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#34 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#34 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &p->lock &of->mutex kn->active#5 remove_cache_srcu irq_context: 0 &p->lock &of->mutex kn->active#5 remove_cache_srcu quarantine_lock irq_context: 0 &p->lock &of->mutex kn->active#5 remove_cache_srcu &c->lock irq_context: 0 &p->lock &of->mutex kn->active#5 remove_cache_srcu &n->list_lock irq_context: 0 &p->lock &of->mutex kn->active#5 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &p->lock &of->mutex kn->active#5 remove_cache_srcu &obj_hash[i].lock irq_context: 0 kn->active#28 &____s->seqcount irq_context: 0 mapping.invalidate_lock#2 &rq->__lock irq_context: 0 mapping.invalidate_lock#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss &pcp->lock &zone->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss &pcp->lock &zone->lock &____s->seqcount irq_context: 0 kn->active#5 pool_lock#2 irq_context: 0 kn->active#27 &____s->seqcount irq_context: 0 kn->active#29 &____s->seqcount irq_context: 0 mapping.invalidate_lock#2 &c->lock irq_context: 0 kn->active#27 pool_lock#2 irq_context: 0 kn->active#32 &____s->seqcount irq_context: 0 kn->active#33 &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#2 &sem->wait_lock irq_context: 0 &type->i_mutex_dir_key#2 &rq->__lock irq_context: 0 &type->i_mutex_dir_key#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &sem->wait_lock irq_context: 0 sb_writers &p->pi_lock irq_context: 0 sb_writers &p->pi_lock &rq->__lock irq_context: 0 sb_writers &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sem->wait_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &rq->__lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#30 &____s->seqcount irq_context: 0 &p->lock &of->mutex kn->active#31 &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback &base->lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss rcu_read_lock rcu_node_0 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss rcu_read_lock &rq->__lock irq_context: 0 kn->active#31 &____s->seqcount irq_context: 0 mapping.invalidate_lock#2 rcu_read_lock &c->lock irq_context: 0 kn->active#27 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 sb_writers#8 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#8 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#8 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#35 fs_reclaim irq_context: 0 kn->active#35 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#35 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#35 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#35 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] remove_cache_srcu &c->lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] remove_cache_srcu &n->list_lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] remove_cache_srcu &obj_hash[i].lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] remove_cache_srcu &rq->__lock irq_context: 0 kn->active#36 fs_reclaim irq_context: 0 kn->active#36 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#36 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#36 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#36 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#37 fs_reclaim irq_context: 0 kn->active#37 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#37 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#37 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#37 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 fs_reclaim &rq->__lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock rcu_read_lock &rq->__lock irq_context: 0 kn->active#37 &c->lock irq_context: 0 kn->active#37 &____s->seqcount irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss remove_cache_srcu irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss remove_cache_srcu quarantine_lock irq_context: 0 kn->active#38 fs_reclaim irq_context: 0 kn->active#38 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#38 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#38 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#38 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &p->lock &of->mutex kn->active#38 i2c_dev_list_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex key#5 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &mapping->i_mmap_rwsem &sem->wait_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &mapping->i_mmap_rwsem &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &mapping->i_mmap_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#37 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#37 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu quarantine_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mapping->i_mmap_rwsem &sem->wait_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mapping->i_mmap_rwsem &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mapping->i_mmap_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mapping->i_mmap_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 &vma->vm_lock->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &sem->wait_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &p->pi_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock lock#4 &lruvec->lru_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &sem->wait_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &p->pi_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &p->pi_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &p->pi_lock irq_context: 0 &mm->mmap_lock remove_cache_srcu &c->lock irq_context: 0 &mm->mmap_lock remove_cache_srcu &n->list_lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &n->list_lock &c->lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &n->list_lock irq_context: 0 &mm->mmap_lock &n->list_lock &c->lock irq_context: 0 videodev_lock irq_context: 0 &dev_instance->mutex irq_context: 0 &dev_instance->mutex fs_reclaim irq_context: 0 &dev_instance->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev_instance->mutex &c->lock irq_context: 0 &dev_instance->mutex pool_lock#2 irq_context: 0 &dev_instance->mutex vicodec_core:1844:(hdl)->_lock irq_context: 0 &dev_instance->mutex &vdev->fh_lock irq_context: 0 &mdev->req_queue_mutex irq_context: 0 &mdev->req_queue_mutex &dev_instance->mutex irq_context: 0 &mdev->req_queue_mutex &dev_instance->mutex &m2m_dev->job_spinlock irq_context: 0 &mdev->req_queue_mutex &dev_instance->mutex &q->done_wq irq_context: 0 &mdev->req_queue_mutex &dev_instance->mutex &q->mmap_lock irq_context: 0 &mdev->req_queue_mutex &dev_instance->mutex &obj_hash[i].lock irq_context: 0 &mdev->req_queue_mutex &dev_instance->mutex pool_lock#2 irq_context: 0 &mdev->req_queue_mutex &vdev->fh_lock irq_context: 0 &mdev->req_queue_mutex &mdev->graph_mutex irq_context: 0 &mdev->req_queue_mutex vicodec_core:1844:(hdl)->_lock irq_context: 0 &mdev->req_queue_mutex vicodec_core:1844:(hdl)->_lock &obj_hash[i].lock irq_context: 0 &mdev->req_queue_mutex vicodec_core:1844:(hdl)->_lock pool_lock#2 irq_context: 0 &mdev->req_queue_mutex &obj_hash[i].lock irq_context: 0 &mdev->req_queue_mutex pool_lock#2 irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem pool_lock#2 irq_context: 0 &vma->vm_lock->lock rcu_read_lock &rq->__lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &pipe->rd_wait &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#39 fs_reclaim irq_context: 0 kn->active#39 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#39 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#39 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#39 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss remove_cache_srcu &c->lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss remove_cache_srcu &n->list_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss remove_cache_srcu &rq->__lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 fh->state->lock irq_context: 0 &vdev->fh_lock irq_context: 0 &dev->dev_mutex irq_context: 0 &dev->dev_mutex &rq->__lock irq_context: 0 &dev->dev_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tomoyo_ss &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock remove_cache_srcu &rq->__lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->dev_mutex fs_reclaim irq_context: 0 &dev->dev_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &dev->dev_mutex pool_lock#2 irq_context: 0 &dev->dev_mutex vim2m:1183:(hdl)->_lock irq_context: 0 &dev->dev_mutex &c->lock irq_context: 0 &dev->dev_mutex &obj_hash[i].lock irq_context: 0 &dev->dev_mutex &vdev->fh_lock irq_context: 0 &mdev->req_queue_mutex vim2m:1183:(hdl)->_lock irq_context: 0 &mdev->req_queue_mutex vim2m:1183:(hdl)->_lock &obj_hash[i].lock irq_context: 0 &mdev->req_queue_mutex vim2m:1183:(hdl)->_lock pool_lock#2 irq_context: 0 &mdev->req_queue_mutex &dev->dev_mutex irq_context: 0 &mdev->req_queue_mutex &dev->dev_mutex &m2m_dev->job_spinlock irq_context: 0 &mdev->req_queue_mutex &dev->dev_mutex &q->done_wq irq_context: 0 &mdev->req_queue_mutex &dev->dev_mutex &q->mmap_lock irq_context: 0 &mdev->req_queue_mutex &dev->dev_mutex &obj_hash[i].lock irq_context: 0 &mdev->req_queue_mutex &dev->dev_mutex pool_lock#2 irq_context: 0 &pipe->rd_wait &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 &p->lock &mm->mmap_lock fs_reclaim irq_context: 0 &p->lock &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &p->lock &mm->mmap_lock &____s->seqcount irq_context: 0 &p->lock &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &p->lock &mm->mmap_lock ptlock_ptr(page)#2 lock#4 irq_context: 0 &pipe->mutex/1 &pipe->rd_wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &cfs_rq->removed.lock irq_context: 0 &p->lock &rq->__lock irq_context: 0 &u->bindlock irq_context: 0 &u->bindlock fs_reclaim irq_context: 0 &u->bindlock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &u->bindlock pool_lock#2 irq_context: 0 &u->bindlock batched_entropy_u32.lock irq_context: 0 &u->bindlock &net->unx.table.locks[i] irq_context: 0 &u->bindlock &net->unx.table.locks[i] &net->unx.table.locks[i]/1 irq_context: 0 &u->bindlock &net->unx.table.locks[i]/1 irq_context: 0 &u->lock &u->lock/1 &sk->sk_peer_lock irq_context: 0 &u->lock &u->lock/1 &dentry->d_lock irq_context: 0 &u->lock &u->lock/1 &sk->sk_peer_lock &sk->sk_peer_lock/1 irq_context: 0 &u->lock &u->lock/1 &sk->sk_peer_lock/1 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss quarantine_lock irq_context: 0 &u->iolock rcu_read_lock &ei->socket.wq.wait irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &ei->socket.wq.wait irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &dentry->d_lock irq_context: 0 &group->mark_mutex &sb->s_type->i_lock_key &dentry->d_lock &dentry->d_lock/1 irq_context: 0 &vcapture->lock irq_context: 0 &mdev->graph_mutex irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss quarantine_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 &pcp->lock &zone->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu quarantine_lock irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu &c->lock irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu &n->list_lock irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem &sem->wait_lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock fs_reclaim &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock fs_reclaim &obj_hash[i].lock irq_context: softirq &(&wb->dwork)->timer irq_context: softirq &(&wb->dwork)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&wb->dwork)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&wb->dwork)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq &(&wb->dwork)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&wb->dwork)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &wb->work_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &wb->list_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &p->sequence irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) key#10 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &wb->work_lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &wb->work_lock &base->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &wb->work_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 batched_entropy_u32.lock crngs.lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override pool_lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override &c->lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: softirq (&journal->j_commit_timer) irq_context: softirq (&journal->j_commit_timer) &p->pi_lock irq_context: softirq (&journal->j_commit_timer) &p->pi_lock &rq->__lock irq_context: softirq (&journal->j_commit_timer) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_checkpoint_mutex irq_context: 0 &journal->j_checkpoint_mutex mmu_notifier_invalidate_range_start irq_context: 0 &journal->j_checkpoint_mutex pool_lock#2 irq_context: 0 &journal->j_checkpoint_mutex tk_core.seq.seqcount irq_context: 0 &journal->j_checkpoint_mutex &dd->lock irq_context: 0 &journal->j_checkpoint_mutex &obj_hash[i].lock irq_context: 0 &journal->j_checkpoint_mutex &base->lock irq_context: 0 &journal->j_checkpoint_mutex &base->lock &obj_hash[i].lock irq_context: 0 &journal->j_checkpoint_mutex rcu_read_lock &pool->lock irq_context: 0 &journal->j_checkpoint_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &journal->j_checkpoint_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &journal->j_checkpoint_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &journal->j_checkpoint_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &journal->j_checkpoint_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_checkpoint_mutex bit_wait_table + i irq_context: 0 &journal->j_checkpoint_mutex &rq->__lock irq_context: 0 &journal->j_checkpoint_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_checkpoint_mutex &journal->j_state_lock irq_context: 0 &journal->j_state_lock &journal->j_wait_updates irq_context: 0 &journal->j_list_lock irq_context: 0 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 &ei->i_es_lock irq_context: 0 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 lock#4 irq_context: 0 &mapping->private_lock irq_context: 0 &ret->b_state_lock irq_context: 0 &ret->b_state_lock &journal->j_list_lock irq_context: 0 &ei->i_es_lock key#2 irq_context: 0 &dd->lock irq_context: 0 &journal->j_state_lock irq_context: 0 &journal->j_state_lock &journal->j_list_lock irq_context: 0 rcu_read_lock &dd->lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) &dd->lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock &dd->lock irq_context: 0 &ret->b_state_lock &journal->j_list_lock rcu_read_lock &memcg->move_lock irq_context: 0 &ret->b_state_lock &journal->j_list_lock rcu_read_lock &xa->xa_lock#8 irq_context: 0 &ret->b_state_lock &journal->j_list_lock &sb->s_type->i_lock_key#3 irq_context: 0 &ret->b_state_lock &journal->j_list_lock &wb->list_lock irq_context: 0 &ret->b_state_lock &journal->j_list_lock &wb->list_lock &sb->s_type->i_lock_key#3 irq_context: 0 &ret->b_state_lock &journal->j_list_lock rcu_read_lock &xa->xa_lock#8 key#10 irq_context: 0 &journal->j_state_lock &journal->j_list_lock irq_context: 0 &sbi->s_md_lock irq_context: 0 &journal->j_fc_wait irq_context: 0 &journal->j_history_lock irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dev->mutex#3 irq_context: 0 &mdev->req_queue_mutex &dev->mutex#3 irq_context: 0 &mdev->req_queue_mutex &dev->mutex#3 &vdev->fh_lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &meta->lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem kfence_freelist_lock irq_context: 0 kn->active#39 &c->lock irq_context: 0 &pipe->mutex/1 &cfs_rq->removed.lock irq_context: 0 &pipe->mutex/1 &obj_hash[i].lock irq_context: 0 kn->active#39 &n->list_lock irq_context: 0 kn->active#39 &n->list_lock &c->lock irq_context: 0 kn->active#37 &n->list_lock irq_context: 0 kn->active#37 &n->list_lock &c->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu &c->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu &n->list_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 crngs.lock base_crng.lock irq_context: 0 remove_cache_srcu rcu_read_lock &____s->seqcount irq_context: 0 &mm->mmap_lock batched_entropy_u8.lock crngs.lock irq_context: 0 kn->active#39 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 &vma->vm_lock->lock &cfs_rq->removed.lock irq_context: 0 &vma->vm_lock->lock &obj_hash[i].lock irq_context: 0 &pipe->rd_wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &vma->vm_lock->lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock rcu_node_0 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock rcu_node_0 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &rq->__lock irq_context: 0 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock &folio_wait_table[i] irq_context: 0 &mm->mmap_lock &folio_wait_table[i] &p->pi_lock irq_context: 0 &mm->mmap_lock &folio_wait_table[i] &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &folio_wait_table[i] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page)#2 &folio_wait_table[i] irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page)#2 &folio_wait_table[i] &p->pi_lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page)#2 &folio_wait_table[i] &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page)#2 &folio_wait_table[i] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tasklist_lock &sighand->siglock &sighand->signalfd_wqh &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#37 &kernfs_locks->open_file_mutex[count] &n->list_lock irq_context: 0 kn->active#37 &kernfs_locks->open_file_mutex[count] &n->list_lock &c->lock irq_context: 0 &ep->mtx key#11 irq_context: 0 &root->kernfs_rwsem &cfs_rq->removed.lock irq_context: 0 &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &base->lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 &mm->mmap_lock remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &base->lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock quarantine_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_node_0 irq_context: 0 remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 remove_cache_srcu pool_lock#2 irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &sem->wait_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &mm->page_table_lock quarantine_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &cfs_rq->removed.lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock fs_reclaim &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &obj_hash[i].lock pool_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem &cfs_rq->removed.lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 remove_cache_srcu irq_context: 0 sb_writers &type->i_mutex_dir_key/1 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 remove_cache_srcu &c->lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 &dentry->d_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 tk_core.seq.seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 rename_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 rename_lock rename_lock.seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 rename_lock rename_lock.seqcount &dentry->d_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 &dentry->d_lock/3 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 &dentry->d_lock/3 &____s->seqcount#4 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 &dentry->d_lock/3 &____s->seqcount#4 &____s->seqcount#4/1 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 rename_lock rename_lock.seqcount &dentry->d_lock/2 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &sb->s_type->i_mutex_key#4/4 rename_lock rename_lock.seqcount &dentry->d_lock/2 &dentry->d_lock/3 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 krc.lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &xa->xa_lock#8 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#5 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#5 rcu_read_lock &dentry->d_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &cfs_rq->removed.lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &obj_hash[i].lock pool_lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4/4 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &mapping->i_mmap_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rq->__lock irq_context: 0 tomoyo_ss rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 tomoyo_ss rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 kn->active#37 remove_cache_srcu irq_context: 0 kn->active#37 remove_cache_srcu quarantine_lock irq_context: 0 &f->f_pos_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &mapping->i_mmap_rwsem &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &mapping->i_mmap_rwsem &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &mapping->i_mmap_rwsem pool_lock#2 irq_context: hardirq &x->wait#5 irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &pipe->mutex/1 &obj_hash[i].lock pool_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &p->lock &of->mutex &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#5 &mm->mmap_lock fs_reclaim irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#5 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#5 &mm->mmap_lock &____s->seqcount irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#5 &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#5 &mm->mmap_lock ptlock_ptr(page)#2 lock#4 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 tomoyo_ss rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#39 &kernfs_locks->open_file_mutex[count] &n->list_lock irq_context: 0 kn->active#39 &kernfs_locks->open_file_mutex[count] &n->list_lock &c->lock irq_context: 0 kn->active#39 &kernfs_locks->open_file_mutex[count] &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#5 &sem->wait_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#5 &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tomoyo_ss &base->lock irq_context: 0 tomoyo_ss &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rq->__lock irq_context: 0 kn->active#39 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sem->wait_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock pool_lock irq_context: 0 &mm->mmap_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 tasklist_lock &sighand->siglock batched_entropy_u8.lock irq_context: 0 tasklist_lock &sighand->siglock kfence_freelist_lock irq_context: 0 &sighand->siglock &meta->lock irq_context: 0 &sighand->siglock kfence_freelist_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &vma->vm_lock->lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &vma->vm_lock->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock lock#4 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &____s->seqcount irq_context: 0 &mm->mmap_lock remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq drivers/base/dd.c:321 irq_context: softirq drivers/base/dd.c:321 rcu_read_lock &pool->lock irq_context: softirq drivers/base/dd.c:321 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq drivers/base/dd.c:321 rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq drivers/base/dd.c:321 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq drivers/base/dd.c:321 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work device_links_lock irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work device_links_lock &k->list_lock irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work device_links_lock &k->k_lock irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work deferred_probe_mutex irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work deferred_probe_work irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work &x->wait#10 irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work &pool->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work &rq->__lock irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (deferred_probe_timeout_work).work &obj_hash[i].lock irq_context: 0 sb_writers#5 &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &obj_hash[i].lock irq_context: 0 sb_writers#5 pool_lock#2 irq_context: 0 &mm->mmap_lock remove_cache_srcu &rq->__lock irq_context: 0 &mm->mmap_lock remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mapping->i_mmap_rwsem pgd_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mapping->i_mmap_rwsem rcu_read_lock pool_lock#2 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mapping->i_mmap_rwsem &obj_hash[i].lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mapping->i_mmap_rwsem key irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mapping->i_mmap_rwsem pcpu_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mapping->i_mmap_rwsem percpu_counters_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mapping->i_mmap_rwsem pool_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mapping->i_mmap_rwsem pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem &base->lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem &base->lock &obj_hash[i].lock irq_context: 0 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&wb->dwork)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &wb->list_lock &sb->s_type->i_lock_key#3 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &wb->list_lock &type->s_umount_key#32 &sb->s_type->i_lock_key#22 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sb->s_type->i_lock_key#22 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &wb->list_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &wb->list_lock &type->s_umount_key#40 &sb->s_type->i_lock_key#3 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &sb->s_type->i_lock_key#3 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &memcg->move_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#8 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#8 &s->s_inode_wblist_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 tk_core.seq.seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &dd->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &base->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &c->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 lock#4 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 lock#5 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#8 key#10 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock key#10 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &wb->list_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &wb->list_lock &sb->s_type->i_lock_key#3 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &dd->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) rcu_read_lock &dd->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) rcu_read_lock &base->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) rcu_read_lock &obj_hash[i].lock pool_lock irq_context: softirq rcu_read_lock &memcg->move_lock irq_context: softirq rcu_read_lock &xa->xa_lock#8 irq_context: softirq rcu_read_lock &xa->xa_lock#8 &obj_hash[i].lock irq_context: softirq rcu_read_lock &xa->xa_lock#8 &base->lock irq_context: softirq rcu_read_lock &xa->xa_lock#8 &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &xa->xa_lock#8 key#10 irq_context: softirq rcu_read_lock &xa->xa_lock#8 key#12 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock &xa->xa_lock#8 &wb->work_lock irq_context: softirq rcu_read_lock &xa->xa_lock#8 &wb->work_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &xa->xa_lock#8 &wb->work_lock &base->lock irq_context: softirq rcu_read_lock &xa->xa_lock#8 &wb->work_lock &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &xa->xa_lock#8 &s->s_inode_wblist_lock irq_context: 0 &p->lock remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &p->lock remove_cache_srcu rcu_read_lock &rq->__lock irq_context: softirq &(&wb->bw_dwork)->timer irq_context: softirq &(&wb->bw_dwork)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&wb->bw_dwork)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&wb->bw_dwork)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq &(&wb->bw_dwork)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: softirq &(&wb->bw_dwork)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&wb->bw_dwork)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->bw_dwork)->work) irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->bw_dwork)->work) &wb->list_lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &vma->vm_lock->lock &rq->__lock irq_context: 0 &p->lock &of->mutex kn->active#5 &cfs_rq->removed.lock irq_context: 0 &p->lock rcu_node_0 irq_context: 0 &p->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &p->lock rcu_read_lock rcu_node_0 irq_context: 0 &p->lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 &p->lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &p->lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &p->lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &p->lock rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#5 sb_writers#5 &rq->__lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] &lock->wait_lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &pcp->lock &zone->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 kn->active#5 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#5 &lock->wait_lock irq_context: 0 kn->active#5 &p->pi_lock irq_context: 0 kn->active#5 &p->pi_lock &rq->__lock irq_context: 0 kn->active#5 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem &base->lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &obj_hash[i].lock pool_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &vma->vm_lock->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock pool_lock#2 irq_context: 0 &root->kernfs_iattr_rwsem &cfs_rq->removed.lock irq_context: 0 &root->kernfs_iattr_rwsem &obj_hash[i].lock irq_context: 0 &p->lock &of->mutex kn->active#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_node_0 irq_context: 0 kn->active#40 fs_reclaim irq_context: 0 kn->active#40 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#40 &c->lock irq_context: 0 kn->active#40 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#40 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#40 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 kn->active#41 fs_reclaim irq_context: 0 kn->active#41 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#41 &c->lock irq_context: 0 kn->active#41 &n->list_lock irq_context: 0 kn->active#41 &n->list_lock &c->lock irq_context: 0 kn->active#41 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#41 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#41 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#42 fs_reclaim irq_context: 0 kn->active#42 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#42 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#42 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#42 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock lock#4 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &p->lock &of->mutex kn->active#5 batched_entropy_u8.lock irq_context: 0 &p->lock &of->mutex kn->active#5 kfence_freelist_lock irq_context: 0 &p->lock &of->mutex kn->active#5 &meta->lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss &pcp->lock &zone->lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#12 &rq->__lock irq_context: 0 &lo->lo_mutex irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &pcp->lock &zone->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &disk->open_mutex &lo->lo_mutex irq_context: 0 &disk->open_mutex nbd_index_mutex irq_context: 0 &disk->open_mutex nbd_index_mutex &nbd->config_lock irq_context: 0 &disk->open_mutex nbd_index_mutex &nbd->config_lock mmu_notifier_invalidate_range_start irq_context: 0 &disk->open_mutex nbd_index_mutex &nbd->config_lock pool_lock#2 irq_context: 0 kn->active#5 rcu_read_lock rcu_node_0 irq_context: 0 kn->active#5 rcu_read_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &n->list_lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &n->list_lock &c->lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss batched_entropy_u8.lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss kfence_freelist_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 tomoyo_ss &meta->lock irq_context: 0 &disk->open_mutex &nbd->config_lock irq_context: 0 &disk->open_mutex &nbd->config_lock &bdev->bd_size_lock irq_context: 0 &disk->open_mutex &nbd->config_lock &q->queue_lock irq_context: 0 &disk->open_mutex &nbd->config_lock &ACCESS_PRIVATE(sdp, lock) irq_context: 0 &disk->open_mutex &nbd->config_lock set->srcu irq_context: 0 &disk->open_mutex &nbd->config_lock &obj_hash[i].lock irq_context: 0 &disk->open_mutex &nbd->config_lock &ACCESS_PRIVATE(ssp->srcu_sup, lock) &ACCESS_PRIVATE(sdp, lock) irq_context: 0 &disk->open_mutex &nbd->config_lock &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock irq_context: 0 &disk->open_mutex &nbd->config_lock &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &disk->open_mutex &nbd->config_lock &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &disk->open_mutex &nbd->config_lock &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &disk->open_mutex &nbd->config_lock &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &disk->open_mutex &nbd->config_lock &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &disk->open_mutex &nbd->config_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &ACCESS_PRIVATE(ssp->srcu_sup, lock) irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ACCESS_PRIVATE(ssp->srcu_sup, lock) irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &disk->open_mutex &nbd->config_lock &x->wait#3 irq_context: 0 &disk->open_mutex &nbd->config_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &ssp->srcu_sup->srcu_cb_mutex &ACCESS_PRIVATE(ssp->srcu_sup, lock) irq_context: 0 &disk->open_mutex &nbd->config_lock set->srcu irq_context: 0 &disk->open_mutex &nbd->config_lock pool_lock#2 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &x->wait#3 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &ssp->srcu_sup->srcu_cb_mutex &rq->__lock irq_context: 0 &u->iolock rcu_read_lock rcu_node_0 irq_context: 0 &u->iolock rcu_read_lock &rq->__lock irq_context: 0 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &mousedev->mutex/1 irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 &dev->mutex#2 irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex rcu_node_0 irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex &rq->__lock irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 &dev->mutex#2 rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tomoyo_ss &rcu_state.expedited_wq irq_context: 0 &mousedev->mutex/1 &mousedev->mutex#2 &dev->mutex#2 &obj_hash[i].lock irq_context: 0 &p->lock &of->mutex kn->active#5 &rfkill->lock irq_context: 0 kn->active#16 &c->lock irq_context: 0 kn->active#16 &n->list_lock irq_context: 0 kn->active#16 &n->list_lock &c->lock irq_context: 0 kn->active#16 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#18 &c->lock irq_context: 0 kn->active#18 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#19 &kernfs_locks->open_file_mutex[count] remove_cache_srcu irq_context: 0 kn->active#19 &kernfs_locks->open_file_mutex[count] remove_cache_srcu quarantine_lock irq_context: 0 kn->active#19 &kernfs_locks->open_file_mutex[count] remove_cache_srcu &n->list_lock irq_context: 0 kn->active#19 &kernfs_locks->open_file_mutex[count] remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 kn->active#19 &kernfs_locks->open_file_mutex[count] remove_cache_srcu &obj_hash[i].lock irq_context: 0 kn->active#19 &kernfs_locks->open_file_mutex[count] remove_cache_srcu &c->lock irq_context: 0 kn->active#19 &kernfs_locks->open_file_mutex[count] remove_cache_srcu &rq->__lock irq_context: 0 &p->lock remove_cache_srcu &rq->__lock irq_context: 0 kn->active#19 &kernfs_locks->open_file_mutex[count] remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 kn->active#19 &kernfs_locks->open_file_mutex[count] remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rq->__lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#19 &lock->wait_lock irq_context: 0 kn->active#19 &p->pi_lock irq_context: 0 kn->active#19 &p->pi_lock &rq->__lock irq_context: 0 kn->active#19 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#43 fs_reclaim irq_context: 0 kn->active#43 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#43 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#43 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#43 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#17 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#18 &n->list_lock irq_context: 0 kn->active#18 &n->list_lock &c->lock irq_context: 0 &u->iolock &base->lock irq_context: 0 &u->iolock &base->lock &obj_hash[i].lock irq_context: 0 &disk->open_mutex &new->lock irq_context: 0 &disk->open_mutex &new->lock &mtdblk->cache_mutex irq_context: 0 kn->active#17 &c->lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &journal->j_state_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &journal->j_state_lock &base->lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &wb->work_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &wb->work_lock &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &wb->work_lock &base->lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &wb->work_lock &base->lock &obj_hash[i].lock irq_context: 0 kn->active#15 &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock &ei->i_es_lock key#2 irq_context: 0 &sig->cred_guard_mutex &iint->mutex ima_extend_list_mutex &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu &rq->__lock irq_context: 0 kn->active#15 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#15 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 kn->active#44 fs_reclaim irq_context: 0 kn->active#44 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#44 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#44 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#44 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &rq->__lock irq_context: 0 &mtd->master.chrdev_lock irq_context: 0 &mtd->master.chrdev_lock &mm->mmap_lock irq_context: 0 tomoyo_ss remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 tomoyo_ss remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 &obj_hash[i].lock pool_lock irq_context: 0 &type->i_mutex_dir_key#5 remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 &type->i_mutex_dir_key#5 remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#5 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss remove_cache_srcu irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss remove_cache_srcu quarantine_lock irq_context: 0 &type->i_mutex_dir_key#2 remove_cache_srcu irq_context: 0 &type->i_mutex_dir_key#2 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &pcp->lock &zone->lock irq_context: 0 kn->active#5 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &p->lock remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 &p->lock remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_node_0 irq_context: 0 &type->i_mutex_dir_key#2 &pcp->lock &zone->lock irq_context: 0 &type->i_mutex_dir_key#2 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 kn->active#45 fs_reclaim irq_context: 0 kn->active#45 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#45 &c->lock irq_context: 0 kn->active#45 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#45 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#45 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &rq->__lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 fs_reclaim &rq->__lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss rcu_read_lock rename_lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss remove_cache_srcu &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key/1 batched_entropy_u8.lock irq_context: 0 sb_writers &type->i_mutex_dir_key/1 kfence_freelist_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&dom->period_timer) irq_context: softirq (&dom->period_timer) key#13 irq_context: softirq (&dom->period_timer) &p->sequence irq_context: softirq (&dom->period_timer) &obj_hash[i].lock irq_context: softirq (&dom->period_timer) &base->lock irq_context: softirq (&dom->period_timer) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &fsnotify_mark_srcu fs_reclaim irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &fsnotify_mark_srcu fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &fsnotify_mark_srcu pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &fsnotify_mark_srcu &group->notification_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &fsnotify_mark_srcu &group->notification_waitq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &fsnotify_mark_srcu &group->notification_waitq &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &fsnotify_mark_srcu &group->notification_waitq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &fsnotify_mark_srcu &group->notification_waitq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &fsnotify_mark_srcu &rq->__lock irq_context: 0 &mark->lock irq_context: 0 &group->inotify_data.idr_lock irq_context: 0 &group->inotify_data.idr_lock &obj_hash[i].lock irq_context: 0 &group->inotify_data.idr_lock pool_lock#2 irq_context: 0 destroy_lock irq_context: 0 fs/notify/mark.c:89 irq_context: 0 (wq_completion)events_unbound connector_reaper_work irq_context: 0 (wq_completion)events_unbound connector_reaper_work destroy_lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work &ACCESS_PRIVATE(sdp, lock) irq_context: 0 (wq_completion)events_unbound connector_reaper_work &fsnotify_mark_srcu irq_context: 0 (reaper_work).work irq_context: 0 (wq_completion)events_unbound connector_reaper_work &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work &ACCESS_PRIVATE(ssp->srcu_sup, lock) &ACCESS_PRIVATE(sdp, lock) irq_context: 0 (wq_completion)events_unbound connector_reaper_work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock irq_context: 0 sb_writers#5 &s->s_inode_list_lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#5 &info->lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sb_writers#5 &sbinfo->stat_lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#5 &xa->xa_lock#8 irq_context: 0 (wq_completion)events_unbound connector_reaper_work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &fsnotify_mark_srcu irq_context: 0 (wq_completion)events_unbound connector_reaper_work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound connector_reaper_work &x->wait#3 irq_context: 0 (wq_completion)events_unbound connector_reaper_work &rq->__lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work irq_context: 0 (wq_completion)events_unbound connector_reaper_work &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (reaper_work).work destroy_lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &ACCESS_PRIVATE(sdp, lock) irq_context: 0 (wq_completion)events_unbound (reaper_work).work &fsnotify_mark_srcu irq_context: 0 (wq_completion)events_unbound (reaper_work).work &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &ACCESS_PRIVATE(ssp->srcu_sup, lock) &ACCESS_PRIVATE(sdp, lock) irq_context: 0 (wq_completion)events_unbound (reaper_work).work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (reaper_work).work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &x->wait#3 irq_context: 0 (wq_completion)events_unbound (reaper_work).work &rq->__lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (reaper_work).work pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound connector_reaper_work &____s->seqcount irq_context: 0 (wq_completion)events_unbound connector_reaper_work pool_lock#2 irq_context: 0 (wq_completion)events_unbound connector_reaper_work rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &rq->__lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 batched_entropy_u8.lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 kfence_freelist_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &meta->lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex &n->list_lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex &n->list_lock &c->lock irq_context: 0 tomoyo_ss tomoyo_policy_lock &n->list_lock irq_context: 0 tomoyo_ss tomoyo_policy_lock &n->list_lock &c->lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tomoyo_ss irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss tomoyo_policy_lock &c->lock irq_context: 0 &type->i_mutex_dir_key#3 &n->list_lock irq_context: 0 &type->i_mutex_dir_key#3 &n->list_lock &c->lock irq_context: softirq &(&tbl->gc_work)->timer irq_context: softirq &(&tbl->gc_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&tbl->gc_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&tbl->gc_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&tbl->gc_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&tbl->gc_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&tbl->gc_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &iint->mutex rcu_read_lock rcu_node_0 irq_context: 0 &iint->mutex rcu_read_lock pgd_lock irq_context: 0 &iint->mutex rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) irq_context: 0 &iint->mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock irq_context: 0 &iint->mutex rcu_read_lock key irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &obj_hash[i].lock irq_context: 0 &iint->mutex rcu_read_lock pcpu_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &base->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &base->lock &obj_hash[i].lock irq_context: 0 &iint->mutex rcu_read_lock percpu_counters_lock irq_context: 0 &iint->mutex mapping.invalidate_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem &c->lock irq_context: 0 &iint->mutex &n->list_lock irq_context: 0 &iint->mutex &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &ei->i_data_sem &c->lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &ei->i_data_sem &n->list_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &u->bindlock &net->unx.table.locks[i]/1 irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tomoyo_ss &c->lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 &ep->mtx remove_cache_srcu irq_context: 0 &ep->mtx remove_cache_srcu quarantine_lock irq_context: 0 &ep->mtx remove_cache_srcu &n->list_lock irq_context: 0 &ep->mtx remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &ep->mtx remove_cache_srcu &obj_hash[i].lock irq_context: 0 &ep->mtx remove_cache_srcu &c->lock irq_context: 0 userns_state_mutex irq_context: 0 &ei->xattr_sem &mapping->private_lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &xa->xa_lock#8 &c->lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex batched_entropy_u8.lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex kfence_freelist_lock irq_context: 0 sb_writers#4 jbd2_handle &pcp->lock &zone->lock irq_context: 0 sb_writers#4 jbd2_handle &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock rcu_node_0 irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock &c->lock batched_entropy_u8.lock irq_context: 0 &mm->mmap_lock &c->lock kfence_freelist_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &meta->lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem kfence_freelist_lock irq_context: 0 &type->i_mutex_dir_key#3 pgd_lock irq_context: 0 &type->i_mutex_dir_key#3 key irq_context: 0 &type->i_mutex_dir_key#3 pcpu_lock irq_context: 0 &type->i_mutex_dir_key#3 percpu_counters_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock lock#4 rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock lock#4 &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock &n->list_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex &n->list_lock irq_context: 0 rtnl_mutex &n->list_lock &c->lock irq_context: 0 rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 rtnl_mutex rlock-AF_NETLINK irq_context: 0 rtnl_mutex (inetaddr_validator_chain).rwsem irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem fs_reclaim irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem pcpu_alloc_mutex irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem pcpu_alloc_mutex pcpu_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &c->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem fib_info_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &dir->lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &____s->seqcount irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem nl_table_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &obj_hash[i].lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem nl_table_wait.lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &ht->lock &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex _xmit_LOOPBACK irq_context: 0 rtnl_mutex netpoll_srcu irq_context: 0 rtnl_mutex &in_dev->mc_tomb_lock irq_context: 0 rtnl_mutex &im->lock irq_context: 0 rtnl_mutex fib_info_lock irq_context: 0 rtnl_mutex rcu_read_lock &dir->lock#2 irq_context: 0 rtnl_mutex cbs_list_lock irq_context: 0 rtnl_mutex &ndev->lock irq_context: 0 rtnl_mutex &idev->mc_lock irq_context: 0 rtnl_mutex (inet6addr_validator_chain).rwsem irq_context: 0 rtnl_mutex rcu_read_lock &net->ipv6.addrconf_hash_lock irq_context: 0 rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock irq_context: 0 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock irq_context: 0 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock pool_lock#2 irq_context: 0 rtnl_mutex &ifa->lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 rtnl_mutex &tb->tb6_lock irq_context: 0 rtnl_mutex &tb->tb6_lock &____s->seqcount irq_context: 0 rtnl_mutex &tb->tb6_lock pool_lock#2 irq_context: 0 rtnl_mutex &tb->tb6_lock &c->lock irq_context: 0 rtnl_mutex &tb->tb6_lock nl_table_lock irq_context: 0 rtnl_mutex &tb->tb6_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &tb->tb6_lock nl_table_wait.lock irq_context: 0 rtnl_mutex &ndev->lock irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_INET irq_context: softirq rcu_callback &dir->lock#2 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss tomoyo_policy_lock &c->lock irq_context: 0 &sig->cred_guard_mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: softirq &(&ipvs->defense_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_INET6 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fs_reclaim irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->i_data_sem irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &xa->xa_lock#8 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 lock#4 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &mapping->private_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &dd->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &pool->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 bit_wait_table + i irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 inode_hash_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 inode_hash_lock &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 inode_hash_lock &s->s_inode_list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &journal->j_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_lock_key#22 &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &dentry->d_lock &lru->node[i].lock irq_context: 0 sb_writers#5 tomoyo_ss irq_context: 0 sb_writers#5 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 tomoyo_ss pool_lock#2 irq_context: 0 sb_writers#5 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#5 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#5 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#5 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#5 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers#5 &xattrs->lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock pgd_lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock key irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock pcpu_lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock percpu_counters_lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &mapping->i_mmap_rwsem irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &sb->s_type->i_lock_key &xa->xa_lock#8 irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 lock#5 irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &lruvec->lru_lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#5 irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 &xa->xa_lock#8 irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 lock#4 irq_context: 0 &f->f_pos_lock sb_writers#5 &sb->s_type->i_mutex_key#12 &info->lock irq_context: 0 &sb->s_type->i_lock_key#4 irq_context: 0 &sb->s_type->i_lock_key#4 &dentry->d_lock irq_context: 0 sk_lock-AF_INET irq_context: 0 sk_lock-AF_INET slock-AF_INET irq_context: 0 slock-AF_INET irq_context: 0 sk_lock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 slock-AF_INET6 irq_context: 0 slock-AF_INET6 irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_es_lock key#6 irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock key#7 irq_context: 0 &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock key#8 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock &____s->seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss tomoyo_policy_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss tomoyo_policy_lock batched_entropy_u8.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss tomoyo_policy_lock kfence_freelist_lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tomoyo_ss &n->list_lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tomoyo_ss &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET &table->hash[i].lock irq_context: 0 sk_lock-AF_INET &table->hash[i].lock clock-AF_INET irq_context: 0 sk_lock-AF_INET &table->hash[i].lock &table->hash2[i].lock irq_context: 0 sk_lock-AF_INET6 &table->hash[i].lock irq_context: 0 sk_lock-AF_INET6 &table->hash[i].lock clock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 &table->hash[i].lock &table->hash2[i].lock irq_context: 0 sk_lock-AF_NETLINK &mm->mmap_lock irq_context: 0 sk_lock-AF_NETLINK fs_reclaim irq_context: 0 sk_lock-AF_NETLINK fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_NETLINK pool_lock#2 irq_context: 0 sk_lock-AF_NETLINK free_vmap_area_lock irq_context: 0 sk_lock-AF_NETLINK free_vmap_area_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_NETLINK free_vmap_area_lock pool_lock#2 irq_context: 0 sk_lock-AF_NETLINK vmap_area_lock irq_context: 0 sk_lock-AF_NETLINK &____s->seqcount irq_context: 0 sk_lock-AF_NETLINK pcpu_alloc_mutex irq_context: 0 sk_lock-AF_NETLINK pcpu_alloc_mutex pcpu_lock irq_context: 0 sk_lock-AF_NETLINK &obj_hash[i].lock irq_context: 0 sk_lock-AF_NETLINK pack_mutex irq_context: 0 sk_lock-AF_NETLINK batched_entropy_u32.lock irq_context: 0 sk_lock-AF_NETLINK text_mutex irq_context: 0 sk_lock-AF_NETLINK text_mutex ptlock_ptr(page)#2 irq_context: 0 sk_lock-AF_NETLINK &fp->aux->used_maps_mutex irq_context: 0 rcu_read_lock &sb->s_type->i_lock_key#22 irq_context: 0 kn->active#46 fs_reclaim irq_context: 0 kn->active#46 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#46 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#46 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#46 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &p->lock cpufreq_driver_lock irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock &ndev->lock &ifa->lock irq_context: 0 &type->i_mutex_dir_key#4 &dentry->d_lock &wq#2 irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex irq_context: 0 cb_lock rtnl_mutex irq_context: 0 cb_lock &obj_hash[i].lock irq_context: 0 cb_lock &c->lock irq_context: 0 cb_lock &____s->seqcount irq_context: softirq &(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 dev_addr_sem irq_context: 0 cb_lock genl_mutex &n->list_lock irq_context: 0 cb_lock genl_mutex &n->list_lock &c->lock irq_context: 0 cb_lock &n->list_lock irq_context: 0 cb_lock &n->list_lock &c->lock irq_context: 0 sb_writers#3 &c->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx irq_context: 0 cb_lock &rdev->wiphy.mtx irq_context: 0 cb_lock &rdev->wiphy.mtx fs_reclaim irq_context: 0 cb_lock &rdev->wiphy.mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &rdev->wiphy.mtx pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx &c->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &____s->seqcount irq_context: 0 cb_lock &rdev->wiphy.mtx &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx rlock-AF_NETLINK irq_context: 0 cb_lock quarantine_lock irq_context: 0 cb_lock remove_cache_srcu irq_context: 0 cb_lock remove_cache_srcu quarantine_lock irq_context: 0 cb_lock remove_cache_srcu &c->lock irq_context: 0 cb_lock remove_cache_srcu &n->list_lock irq_context: 0 cb_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 cb_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC irq_context: 0 cb_lock nlk_cb_mutex-GENERIC fs_reclaim irq_context: 0 cb_lock nlk_cb_mutex-GENERIC fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock nlk_cb_mutex-GENERIC pool_lock#2 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &____s->seqcount irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex &obj_hash[i].lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex pool_lock#2 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex &rdev->wiphy.mtx irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &rdev->wiphy.mtx irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &rdev->wiphy.mtx &wdev->mtx irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rlock-AF_NETLINK irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &obj_hash[i].lock irq_context: 0 sb_writers#3 tomoyo_ss &n->list_lock irq_context: 0 sb_writers#3 tomoyo_ss &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &p->alloc_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock remove_cache_srcu irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock remove_cache_srcu quarantine_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock remove_cache_srcu &c->lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock remove_cache_srcu &n->list_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 &iint->mutex sb_writers#4 &c->lock irq_context: 0 &iint->mutex ima_extend_list_mutex &c->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 quarantine_lock irq_context: 0 sb_writers#5 fs_reclaim irq_context: 0 sb_writers#5 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 sb_writers#5 mount_lock irq_context: 0 sb_writers#5 sb_writers#5 tk_core.seq.seqcount irq_context: 0 sb_writers#5 sb_writers#5 &sb->s_type->i_lock_key irq_context: 0 sb_writers#5 sb_writers#5 &wb->list_lock irq_context: 0 sb_writers#5 sb_writers#5 &wb->list_lock &sb->s_type->i_lock_key irq_context: 0 sb_writers#5 &sb->s_type->i_lock_key &xa->xa_lock#8 irq_context: 0 sb_writers#5 lock#4 irq_context: 0 sb_writers#5 lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#5 lock#5 irq_context: 0 sb_writers#5 &lruvec->lru_lock irq_context: 0 sb_writers#5 rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &iint->mutex &base->lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex &base->lock &obj_hash[i].lock irq_context: softirq (&net->sctp.addr_wq_timer) irq_context: softirq (&net->sctp.addr_wq_timer) &net->sctp.addr_wq_lock irq_context: softirq (&net->sctp.addr_wq_timer) &net->sctp.addr_wq_lock &obj_hash[i].lock irq_context: softirq (&net->sctp.addr_wq_timer) &net->sctp.addr_wq_lock pool_lock#2 irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 tomoyo_ss &rq->__lock irq_context: 0 &sig->cred_guard_mutex &p->alloc_lock &x->wait#25 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &pipe->rd_wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 rtnl_mutex &dev_addr_list_lock_key irq_context: 0 rtnl_mutex &dev_addr_list_lock_key pool_lock#2 irq_context: 0 rtnl_mutex uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) &base->lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) &base->lock &obj_hash[i].lock irq_context: 0 &dentry->d_lock &dentry->d_lock &lru->node[i].lock irq_context: 0 rtnl_mutex &pnettable->lock irq_context: 0 rtnl_mutex smc_ib_devices.mutex irq_context: 0 rtnl_mutex napi_hash_lock irq_context: 0 rtnl_mutex lapb_list_lock irq_context: 0 &root->kernfs_rwsem &sem->wait_lock irq_context: 0 rtnl_mutex x25_neigh_list_lock irq_context: 0 rtnl_mutex console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex console_lock console_srcu console_owner irq_context: 0 rtnl_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 &u->lock &ei->socket.wq.wait irq_context: 0 rtnl_mutex _xmit_ETHER irq_context: 0 rtnl_mutex &tb->tb6_lock rlock-AF_NETLINK irq_context: 0 rtnl_mutex _xmit_SLIP irq_context: hardirq log_wait.lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&eql->timer) irq_context: softirq (&eql->timer) &eql->queue.lock irq_context: softirq (&eql->timer) &obj_hash[i].lock irq_context: softirq (&eql->timer) &base->lock irq_context: softirq (&eql->timer) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &sem->wait_lock irq_context: 0 rtnl_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 &type->i_mutex_dir_key#4 &sem->wait_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#4 &p->pi_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &sem->wait_lock irq_context: 0 sb_writers#5 &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex batched_entropy_u8.lock irq_context: 0 rtnl_mutex kfence_freelist_lock irq_context: 0 rtnl_mutex &vi->refill_lock irq_context: softirq _xmit_ETHER#2 irq_context: 0 rtnl_mutex noop_qdisc.q.lock irq_context: 0 rtnl_mutex uevent_sock_mutex &n->list_lock irq_context: 0 rtnl_mutex uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 rtnl_mutex remove_cache_srcu irq_context: 0 rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 rtnl_mutex &rfkill->lock irq_context: 0 rtnl_mutex &local->chanctx_mtx irq_context: 0 rtnl_mutex &rdev->wiphy.mtx lweventlist_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx lweventlist_lock pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx lweventlist_lock &dir->lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_node_0 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &data->mutex irq_context: 0 rtnl_mutex _xmit_ETHER &local->filter_lock irq_context: 0 rtnl_mutex _xmit_ETHER &local->filter_lock pool_lock#2 irq_context: 0 rtnl_mutex _xmit_ETHER rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex _xmit_ETHER rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy0 irq_context: 0 (wq_completion)phy0 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy0 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 rtnl_mutex &xa->xa_lock#3 pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#4 &p->pi_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#4 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &dev->tx_global_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &dev->tx_global_lock _xmit_ETHER#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &dev->tx_global_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock noop_qdisc.q.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &sch->q.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex net_rwsem irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex class irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex (&tbl->proxy_timer) irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &base->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex cbs_list_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tn->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &tb->tb6_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rlock-AF_NETLINK irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex nl_table_wait.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &base->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)phy1 irq_context: 0 (wq_completion)phy1 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy1 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 rtnl_mutex _xmit_ETHER &local->filter_lock &c->lock irq_context: 0 rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 rtnl_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 rtnl_mutex _xmit_VOID irq_context: 0 &u->iolock &u->lock irq_context: 0 &u->iolock &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &u->iolock &mm->mmap_lock fs_reclaim irq_context: 0 &u->iolock &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &u->iolock &mm->mmap_lock &____s->seqcount irq_context: 0 &u->iolock &mm->mmap_lock mmu_notifier_invalidate_range_start irq_context: 0 &u->iolock &mm->mmap_lock ptlock_ptr(page)#2 lock#4 irq_context: 0 &u->iolock rcu_read_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &n->list_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &n->list_lock &c->lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock &xa->xa_lock#8 &c->lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock &xa->xa_lock#8 &n->list_lock irq_context: 0 &sig->cred_guard_mutex &iint->mutex mapping.invalidate_lock &xa->xa_lock#8 &n->list_lock &c->lock irq_context: 0 rtnl_mutex _xmit_X25 irq_context: 0 rtnl_mutex lapb_list_lock irq_context: 0 rtnl_mutex lapb_list_lock pool_lock#2 irq_context: 0 rtnl_mutex lapb_list_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex lapb_list_lock &base->lock irq_context: 0 rtnl_mutex lapb_list_lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &lapbeth->up_lock irq_context: 0 rtnl_mutex &lapb->lock irq_context: 0 rtnl_mutex &lapb->lock &c->lock irq_context: 0 rtnl_mutex &lapb->lock pool_lock#2 irq_context: 0 rtnl_mutex &lapb->lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 rtnl_mutex &lapb->lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 rtnl_mutex &lapb->lock rcu_read_lock_bh pool_lock#2 irq_context: 0 rtnl_mutex &lapb->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &lapb->lock &base->lock irq_context: 0 rtnl_mutex &lapb->lock &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &ei->i_es_lock key#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &ei->xattr_sem &mapping->private_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->xattr_sem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#8 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 lock#4 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &mapping->private_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &(ei->i_block_reservation_lock) irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &memcg->move_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &xa->xa_lock#8 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &wb->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock irq_context: 0 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) devices_rwsem irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock_bh &base->lock irq_context: 0 rtnl_mutex rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &pool->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ifa->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rlock-AF_NETLINK irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock nl_table_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rlock-AF_NETLINK irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock nl_table_wait.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock batched_entropy_u32.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &base->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &tb->tb6_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: softirq rcu_callback rcu_read_lock rt6_exception_lock irq_context: 0 sk_lock-AF_INET6 &____s->seqcount#8 irq_context: 0 sk_lock-AF_INET6 batched_entropy_u32.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &dir->lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &ul->lock irq_context: 0 sk_lock-AF_INET6 &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 pool_lock#2 irq_context: 0 sk_lock-AF_INET6 batched_entropy_u16.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &table->hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &table->hash[i].lock &table->hash2[i].lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &p->alloc_lock irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock clock-AF_INET irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock clock-AF_INET irq_context: 0 sk_lock-AF_INET &h->lhash2[i].lock irq_context: 0 &u->lock/1 irq_context: 0 &u->iolock &dir->lock irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock clock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock clock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 &h->lhash2[i].lock irq_context: softirq rcu_callback &ul->lock irq_context: 0 &tty->legacy_mutex &tty->ldisc_sem irq_context: 0 &tty->legacy_mutex tasklist_lock irq_context: 0 &tty->legacy_mutex tasklist_lock &sighand->siglock irq_context: 0 &tty->legacy_mutex tasklist_lock &sighand->siglock &tty->ctrl.lock irq_context: 0 &tty->ldisc_sem rcu_read_lock &tty->ctrl.lock irq_context: 0 &tty->ctrl.lock irq_context: 0 tasklist_lock rcu_read_lock &sighand->siglock irq_context: 0 tasklist_lock &sighand->siglock irq_context: 0 &tty->legacy_mutex &tty->ctrl.lock irq_context: 0 &tty->legacy_mutex &f->f_lock irq_context: 0 &tty->legacy_mutex &f->f_lock fasync_lock irq_context: 0 &tty->legacy_mutex &obj_hash[i].lock irq_context: 0 &tty->legacy_mutex pool_lock#2 irq_context: 0 rcu_read_lock &tty->ctrl.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rename_lock.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 fs_reclaim irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &dentry->d_lock &wq#2 irq_context: 0 &port_lock_key irq_context: 0 &buf->lock irq_context: 0 &tty->ldisc_sem &port_lock_key irq_context: 0 &tty->ldisc_sem &port->lock irq_context: 0 &tty->ldisc_sem &tty->termios_rwsem &tty->ldisc_sem &tty->flow.lock irq_context: 0 rtnl_mutex lapb_list_lock &c->lock irq_context: 0 rtnl_mutex &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: 0 rtnl_mutex &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: 0 rtnl_mutex &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: 0 &net->packet.sklist_lock irq_context: 0 sk_lock-AF_PACKET irq_context: 0 sk_lock-AF_PACKET slock-AF_PACKET irq_context: 0 sk_lock-AF_PACKET &po->bind_lock irq_context: 0 sk_lock-AF_PACKET &po->bind_lock ptype_lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_node_0 irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex &rq->__lock irq_context: 0 sk_lock-AF_PACKET &obj_hash[i].lock irq_context: 0 sk_lock-AF_PACKET &po->bind_lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_PACKET &po->bind_lock rcu_read_lock &dir->lock#2 irq_context: 0 sk_lock-AF_PACKET &po->bind_lock rcu_read_lock ptype_lock irq_context: 0 slock-AF_PACKET irq_context: 0 sk_lock-AF_PACKET &mm->mmap_lock irq_context: 0 sk_lock-AF_PACKET fs_reclaim irq_context: 0 sk_lock-AF_PACKET fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_PACKET pool_lock#2 irq_context: 0 sk_lock-AF_PACKET free_vmap_area_lock irq_context: 0 sk_lock-AF_PACKET free_vmap_area_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_PACKET vmap_area_lock irq_context: 0 sk_lock-AF_PACKET &____s->seqcount irq_context: 0 sk_lock-AF_PACKET &c->lock irq_context: 0 sk_lock-AF_PACKET pcpu_alloc_mutex irq_context: 0 sk_lock-AF_PACKET pcpu_alloc_mutex pcpu_lock irq_context: 0 sk_lock-AF_PACKET pack_mutex irq_context: 0 sk_lock-AF_PACKET batched_entropy_u32.lock irq_context: 0 sk_lock-AF_PACKET text_mutex irq_context: 0 sk_lock-AF_PACKET text_mutex ptlock_ptr(page)#2 irq_context: 0 sk_lock-AF_PACKET &fp->aux->used_maps_mutex irq_context: 0 rlock-AF_PACKET irq_context: 0 wlock-AF_PACKET irq_context: 0 &mm->mmap_lock &anon_vma->rwsem quarantine_lock irq_context: 0 &tty->ldisc_sem &ldata->atomic_read_lock irq_context: 0 &tty->ldisc_sem &ldata->atomic_read_lock &tty->termios_rwsem irq_context: 0 &tty->ldisc_sem &ldata->atomic_read_lock &tty->termios_rwsem &tty->read_wait irq_context: 0 &tty->ldisc_sem &ldata->atomic_read_lock (work_completion)(&buf->work) irq_context: 0 &tty->ldisc_sem &ldata->atomic_read_lock &rq->__lock irq_context: 0 &tty->ldisc_sem &ldata->atomic_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock pgd_lock irq_context: 0 rcu_read_lock key irq_context: 0 rcu_read_lock pcpu_lock irq_context: 0 rcu_read_lock percpu_counters_lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock &c->lock irq_context: softirq _xmit_ETHER#2 &obj_hash[i].lock irq_context: softirq _xmit_ETHER#2 pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[0] &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &____s->seqcount irq_context: softirq &(&idev->mc_dad_work)->timer irq_context: softirq &(&idev->mc_dad_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&idev->mc_dad_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&idev->mc_dad_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&idev->mc_dad_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&idev->mc_dad_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &pl->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &pl->lock key#12 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock batched_entropy_u32.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock crngs.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock &base->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex class irq_context: 0 rtnl_mutex (&tbl->proxy_timer) irq_context: softirq &(&idev->mc_ifc_work)->timer irq_context: softirq &(&idev->mc_ifc_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&idev->mc_ifc_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&idev->mc_ifc_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&idev->mc_ifc_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&idev->mc_ifc_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock batched_entropy_u32.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &base->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER &local->filter_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER &local->filter_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ifa->lock &obj_hash[i].lock irq_context: softirq rcu_callback &ul->lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 rtnl_mutex &net->ipv6.addrconf_hash_lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER &local->filter_lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER &local->filter_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER &local->filter_lock pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER &local->filter_lock krc.lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &idev->mc_lock pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock &base->lock irq_context: 0 rtnl_mutex &idev->mc_lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &c->lock irq_context: 0 rtnl_mutex rcu_read_lock &dir->lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_read_lock krc.lock irq_context: 0 rtnl_mutex &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 rtnl_mutex &tb->tb6_lock rt6_exception_lock irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 &dir->lock irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 &obj_hash[i].lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &c->lock irq_context: softirq (&journal->j_commit_timer) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 &journal->j_list_lock &obj_hash[i].lock irq_context: 0 &journal->j_list_lock &c->lock irq_context: 0 &journal->j_list_lock pool_lock#2 irq_context: 0 rcu_read_lock &base->lock irq_context: 0 rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 sk_lock-AF_INET6 fs_reclaim irq_context: 0 sk_lock-AF_INET6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock irq_context: 0 sk_lock-AF_INET6 once_lock irq_context: 0 sk_lock-AF_INET6 once_lock crngs.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_node_0 irq_context: 0 sk_lock-AF_INET6 &rq->__lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &tbl->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &n->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &____s->seqcount#9 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_PACKET &n->list_lock irq_context: 0 sk_lock-AF_PACKET &n->list_lock &c->lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 &sb->s_type->i_mutex_key#10 &net->packet.sklist_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &po->bind_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &po->bind_lock ptype_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &po->bind_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &po->bind_lock &dir->lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET slock-AF_PACKET irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_PACKET irq_context: 0 &sb->s_type->i_mutex_key#10 fanout_mutex irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_PACKET irq_context: 0 &sb->s_type->i_mutex_key#10 rlock-AF_PACKET irq_context: 0 &sb->s_type->i_mutex_key#10 pcpu_lock irq_context: 0 &sb->s_type->i_mutex_key#10 elock-AF_PACKET irq_context: 0 sk_lock-AF_INET6 &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: softirq &(&ifa->dad_work)->timer irq_context: softirq &(&ifa->dad_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&ifa->dad_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&ifa->dad_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&ifa->dad_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&ifa->dad_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock &base->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &dir->lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ul->lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock remove_cache_srcu irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock krc.lock irq_context: softirq rcu_read_lock rcu_read_lock once_lock irq_context: softirq rcu_read_lock rcu_read_lock once_lock crngs.lock irq_context: softirq rcu_read_lock rcu_read_lock batched_entropy_u8.lock irq_context: softirq rcu_read_lock rcu_read_lock kfence_freelist_lock irq_context: softirq rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq rcu_read_lock rcu_read_lock &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq rcu_read_lock once_lock irq_context: softirq rcu_read_lock once_lock crngs.lock irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: softirq rcu_read_lock rcu_read_lock &base->lock irq_context: softirq rcu_read_lock rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&w->work) &meta->lock irq_context: 0 (wq_completion)events (work_completion)(&w->work) kfence_freelist_lock irq_context: softirq _xmit_ETHER#2 rcu_read_lock &ei->socket.wq.wait irq_context: softirq rcu_read_lock rlock-AF_PACKET irq_context: softirq rcu_read_lock rcu_read_lock &ei->socket.wq.wait irq_context: softirq rcu_read_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: softirq (&rxnet->peer_keepalive_timer) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq (&rxnet->peer_keepalive_timer) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex kfence_freelist_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &sighand->siglock irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock batched_entropy_u8.lock irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock kfence_freelist_lock irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock &sighand->signalfd_wqh irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock &ep->wq irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock &ep->wq &p->pi_lock irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock &c->lock irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock &____s->seqcount irq_context: 0 &ep->mtx rcu_read_lock &sighand->signalfd_wqh irq_context: 0 &ep->mtx rcu_read_lock &ei->socket.wq.wait irq_context: 0 tasklist_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock &ep->wq irq_context: 0 tasklist_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock &ep->wq &p->pi_lock irq_context: 0 tasklist_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 tasklist_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock pgd_lock irq_context: 0 &mm->mmap_lock key irq_context: 0 &mm->mmap_lock pcpu_lock irq_context: 0 &mm->mmap_lock percpu_counters_lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: hardirq &x->wait#12 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock kfence_freelist_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock remove_cache_srcu irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: softirq (&dev->watchdog_timer) irq_context: softirq (&lapb->t1timer) irq_context: softirq (&dev->watchdog_timer) &dev->tx_global_lock irq_context: softirq (&lapb->t1timer) &lapb->lock irq_context: softirq (&dev->watchdog_timer) &dev->tx_global_lock &obj_hash[i].lock irq_context: softirq (&lapb->t1timer) &lapb->lock batched_entropy_u8.lock irq_context: softirq (&dev->watchdog_timer) &dev->tx_global_lock &base->lock irq_context: softirq (&lapb->t1timer) &lapb->lock kfence_freelist_lock irq_context: softirq (&dev->watchdog_timer) &dev->tx_global_lock &base->lock &obj_hash[i].lock irq_context: softirq (&lapb->t1timer) &lapb->lock pool_lock#2 irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh pool_lock#2 irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh &meta->lock irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh kfence_freelist_lock irq_context: softirq (&lapb->t1timer) &lapb->lock &obj_hash[i].lock irq_context: softirq (&lapb->t1timer) &lapb->lock &base->lock irq_context: softirq (&lapb->t1timer) &lapb->lock &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &____s->seqcount#10 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: softirq (&lapb->t1timer) &lapb->lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &journal->j_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &journal->j_state_lock &base->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &xa->xa_lock#8 key#10 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &rq->__lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rlock-AF_NETLINK irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &dir->lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &____s->seqcount irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#5 &dentry->d_lock irq_context: 0 sb_writers#5 tomoyo_ss &c->lock irq_context: 0 sb_writers#5 &c->lock irq_context: 0 sb_writers#5 &____s->seqcount irq_context: 0 hostname_poll.wait.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_read_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &rcu_state.expedited_wq irq_context: 0 fs_reclaim mmu_notifier_invalidate_range_start pgd_lock irq_context: 0 fs_reclaim mmu_notifier_invalidate_range_start rcu_read_lock pool_lock#2 irq_context: 0 fs_reclaim mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 fs_reclaim mmu_notifier_invalidate_range_start key irq_context: 0 fs_reclaim mmu_notifier_invalidate_range_start pcpu_lock irq_context: 0 fs_reclaim mmu_notifier_invalidate_range_start percpu_counters_lock irq_context: 0 fs_reclaim mmu_notifier_invalidate_range_start pool_lock#2 irq_context: 0 fs_reclaim mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_PACKET &po->bind_lock rcu_read_lock &c->lock irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 lock#4 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: softirq &(&dm_bufio_cleanup_old_work)->timer irq_context: softirq &(&dm_bufio_cleanup_old_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&dm_bufio_cleanup_old_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&dm_bufio_cleanup_old_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&dm_bufio_cleanup_old_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&dm_bufio_cleanup_old_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)dm_bufio_cache irq_context: 0 (wq_completion)dm_bufio_cache (work_completion)(&(&dm_bufio_cleanup_old_work)->work) irq_context: 0 (wq_completion)dm_bufio_cache (work_completion)(&(&dm_bufio_cleanup_old_work)->work) dm_bufio_clients_lock irq_context: 0 (wq_completion)dm_bufio_cache (work_completion)(&(&dm_bufio_cleanup_old_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)dm_bufio_cache (work_completion)(&(&dm_bufio_cleanup_old_work)->work) &base->lock irq_context: 0 (wq_completion)dm_bufio_cache (work_completion)(&(&dm_bufio_cleanup_old_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: softirq rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: softirq rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock once_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock once_lock crngs.lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock batched_entropy_u32.lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &base->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &hashinfo->ehash_locks[i] irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock &base->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &n->lock irq_context: softirq rcu_read_lock &n->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &n->lock &base->lock irq_context: softirq rcu_read_lock &n->lock &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &n->lock &(&n->ha_lock)->lock irq_context: softirq rcu_read_lock &n->lock &(&n->ha_lock)->lock &____s->seqcount#9 irq_context: softirq rcu_read_lock rcu_read_lock &n->lock irq_context: softirq rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock lock#8 irq_context: softirq rcu_read_lock rcu_read_lock id_table_lock irq_context: softirq rcu_read_lock &n->lock irq_context: softirq rcu_read_lock &n->lock &____s->seqcount#9 irq_context: softirq rcu_read_lock nl_table_lock irq_context: softirq rcu_read_lock rlock-AF_NETLINK irq_context: softirq rcu_read_lock rcu_read_lock &dir->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET batched_entropy_u16.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET &tcp_hashinfo.bhash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET &hashinfo->ehash_locks[i] irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET (&req->rsk_timer) irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET &base->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET &queue->rskq_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET rcu_read_lock tcp_metrics_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET rcu_read_lock tcp_metrics_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET rcu_read_lock &ei->socket.wq.wait irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET &queue->rskq_lock irq_context: 0 sk_lock-AF_INET clock-AF_INET irq_context: 0 sk_lock-AF_INET &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET &base->lock irq_context: 0 sk_lock-AF_INET &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET fs_reclaim irq_context: 0 sk_lock-AF_INET fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET &____s->seqcount irq_context: 0 sk_lock-AF_INET pool_lock#2 irq_context: 0 sk_lock-AF_INET &c->lock irq_context: 0 sk_lock-AF_INET &mm->mmap_lock irq_context: 0 sk_lock-AF_INET tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET &sd->defer_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq &sd->defer_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &base->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET &meta->lock irq_context: 0 sk_lock-AF_INET kfence_freelist_lock irq_context: 0 &u->iolock rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 &u->iolock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 &u->iolock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&icsk->icsk_delack_timer) irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET irq_context: softirq (&icsk->icsk_retransmit_timer) irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET tk_core.seq.seqcount irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET pool_lock#2 irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &meta->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 kfence_freelist_lock irq_context: 0 &iint->mutex mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &base->lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET &rq->__lock irq_context: 0 sk_lock-AF_INET batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 sk_lock-AF_INET &mm->mmap_lock fs_reclaim irq_context: 0 sk_lock-AF_INET &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET &mm->mmap_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET &mm->mmap_lock mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET &mm->mmap_lock ptlock_ptr(page)#2 lock#4 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET batched_entropy_u8.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET kfence_freelist_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET clock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &meta->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &hashinfo->ehash_locks[i] irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET elock-AF_INET irq_context: softirq rcu_read_lock rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET &obj_hash[i].lock pool_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET rcu_read_lock &ei->socket.wq.wait &p->pi_lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq rcu_callback uidhash_lock irq_context: softirq rcu_callback ucounts_lock irq_context: 0 sk_lock-AF_INET6 batched_entropy_u16.lock crngs.lock irq_context: 0 rcu_read_lock &vma->vm_lock->lock rcu_node_0 irq_context: 0 &pipe->mutex/1 &mm->mmap_lock fs_reclaim irq_context: 0 &pipe->mutex/1 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &pipe->mutex/1 &mm->mmap_lock &____s->seqcount irq_context: 0 &pipe->mutex/1 &mm->mmap_lock mmu_notifier_invalidate_range_start irq_context: 0 &pipe->mutex/1 &mm->mmap_lock ptlock_ptr(page)#2 lock#4 irq_context: softirq rcu_read_lock rcu_read_lock &ul->lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock &ul->lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET remove_cache_srcu irq_context: 0 sk_lock-AF_INET remove_cache_srcu quarantine_lock irq_context: 0 sk_lock-AF_INET remove_cache_srcu &c->lock irq_context: 0 sk_lock-AF_INET remove_cache_srcu &n->list_lock irq_context: 0 sk_lock-AF_INET remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET remove_cache_srcu &obj_hash[i].lock irq_context: 0 &vma->vm_lock->lock &lruvec->lru_lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock pool_lock#2 irq_context: 0 &pipe->wr_wait irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &____s->seqcount irq_context: 0 &pipe->mutex/1 rcu_read_lock pool_lock#2 irq_context: 0 &pipe->wr_wait &p->pi_lock irq_context: 0 &pipe->wr_wait &p->pi_lock &rq->__lock irq_context: 0 &pipe->wr_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->wr_wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#8 pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_es_lock key#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &(ei->i_block_reservation_lock) key#14 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &pcp->lock &zone->lock &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &____s->seqcount irq_context: 0 &pipe->mutex/1 &pcp->lock &zone->lock irq_context: 0 &pipe->mutex/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &vma->vm_lock->lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &vma->vm_lock->lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#8 &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &rq->__lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock &anon_vma->rwsem ptlock_ptr(page) irq_context: 0 &vma->vm_lock->lock &c->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem &____s->seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem &c->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem lock#4 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem lock#4 &lruvec->lru_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem lock#5 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle lock#4 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle lock#5 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &____s->seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &c->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_es_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ei->i_prealloc_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &____s->seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &c->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &mapping->private_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ret->b_state_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ret->b_state_lock &journal->j_list_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &journal->j_revoke_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &pa->pa_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &lg->lg_prealloc_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &memcg->move_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#8 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#8 &s->s_inode_wblist_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_raw_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem tk_core.seq.seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem &dd->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem &base->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#8 &wb->work_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#8 &wb->work_lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#8 &wb->work_lock &base->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#8 &wb->work_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &pa->pa_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem &rq_wait->wait irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem rcu_read_lock &pool->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem rcu_node_0 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &ei->i_completed_io_lock irq_context: softirq &ei->i_completed_io_lock rcu_read_lock &pool->lock/1 irq_context: softirq &ei->i_completed_io_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &ei->i_completed_io_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: softirq &ei->i_completed_io_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq &ei->i_completed_io_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &ei->i_completed_io_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &ei->i_completed_io_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &journal->j_state_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &journal->j_state_lock &journal->j_wait_reserved irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_es_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_raw_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &journal->j_wait_updates irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &obj_hash[i].lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) pool_lock#2 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &ext4__ioend_wq[i] irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &ret->b_uptodate_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &memcg->move_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#8 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#8 &wb->work_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#8 &s->s_inode_wblist_lock irq_context: softirq &rq_wait->wait irq_context: softirq &rq_wait->wait &p->pi_lock irq_context: softirq &rq_wait->wait &p->pi_lock &rq->__lock irq_context: softirq &rq_wait->wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sbi->s_md_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &ei->i_prealloc_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem key#3 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &pa->pa_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) key#14 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock key#10 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#8 key#10 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem rcu_read_lock &dd->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem rcu_read_lock &virtscsi_vq->vq_lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET &c->lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_es_lock key#2 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#8 key#12 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#8 key#13 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#8 key#10 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &wb->work_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &wb->work_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &wb->work_lock &base->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &wb->work_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &ul->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &pcp->lock &zone->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 lock pidmap_lock &n->list_lock irq_context: 0 lock pidmap_lock &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &zone->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &zone->lock &____s->seqcount irq_context: softirq (&dom->period_timer) &p->sequence key#13 irq_context: 0 &vma->vm_lock->lock fs_reclaim &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &rq->__lock irq_context: 0 &journal->j_state_lock &journal->j_wait_transaction_locked &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_state_lock &journal->j_wait_transaction_locked &p->pi_lock &rq->__lock irq_context: 0 &journal->j_state_lock &journal->j_wait_transaction_locked &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &xa->xa_lock#8 pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &base->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ret->b_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle bit_wait_table + i irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_node_0 irq_context: 0 &pipe->mutex/1 rcu_read_lock rcu_node_0 irq_context: 0 &pipe->mutex/1 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &obj_hash[i].lock irq_context: 0 &ret->b_state_lock &journal->j_list_lock key#15 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &n->list_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &n->list_lock &c->lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &obj_hash[i].lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &c->lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET slock-AF_INET tk_core.seq.seqcount irq_context: softirq slock-AF_INET tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_node_0 irq_context: 0 sk_lock-AF_INET &n->list_lock irq_context: 0 sk_lock-AF_INET &n->list_lock &c->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &c->lock irq_context: softirq drivers/regulator/core.c:6262 irq_context: softirq drivers/regulator/core.c:6262 rcu_read_lock &pool->lock irq_context: softirq drivers/regulator/core.c:6262 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq drivers/regulator/core.c:6262 rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq drivers/regulator/core.c:6262 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq drivers/regulator/core.c:6262 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (regulator_init_complete_work).work irq_context: 0 (wq_completion)events (regulator_init_complete_work).work &k->list_lock irq_context: 0 (wq_completion)events (regulator_init_complete_work).work &k->k_lock irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: softirq slock-AF_INET &obj_hash[i].lock irq_context: softirq slock-AF_INET &base->lock irq_context: softirq slock-AF_INET &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &meta->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 kfence_freelist_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#8 &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#8 &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock quarantine_lock irq_context: 0 &pipe->mutex/1 &mm->mmap_lock &rq->__lock irq_context: 0 sk_lock-AF_INET &pcp->lock &zone->lock irq_context: 0 sk_lock-AF_INET &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 batched_entropy_u8.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 kfence_freelist_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &meta->lock irq_context: 0 &pipe->mutex/1 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &pipe->mutex/1 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &pipe->mutex/1 rcu_read_lock &stopper->lock irq_context: 0 &pipe->mutex/1 rcu_read_lock &stop_pi_lock irq_context: 0 &pipe->mutex/1 rcu_read_lock &stop_pi_lock &rq->__lock irq_context: 0 &pipe->mutex/1 rcu_read_lock &stop_pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->wr_wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &pipe->mutex/1 rcu_read_lock &rcu_state.gp_wq irq_context: 0 &pipe->mutex/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &pipe->mutex/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &pipe->mutex/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 quarantine_lock irq_context: 0 &mm->mmap_lock quarantine_lock irq_context: 0 &vma->vm_lock->lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &meta->lock irq_context: 0 sk_lock-AF_INET remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 tomoyo_ss irq_context: 0 sb_writers#4 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#4 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#4 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ret->b_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock &pa->pa_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &hashinfo->ehash_locks[i] irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &tcp_hashinfo.bhash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 elock-AF_INET irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &ul->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock batched_entropy_u32.lock crngs.lock irq_context: 0 &sb->s_type->i_mutex_key#10 quarantine_lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET batched_entropy_u8.lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET kfence_freelist_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &c->lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 rcu_read_lock &vma->vm_lock->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 quarantine_lock irq_context: 0 &vma->vm_lock->lock remove_cache_srcu irq_context: 0 &vma->vm_lock->lock remove_cache_srcu quarantine_lock irq_context: 0 &vma->vm_lock->lock remove_cache_srcu &c->lock irq_context: 0 &vma->vm_lock->lock remove_cache_srcu &n->list_lock irq_context: 0 &vma->vm_lock->lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &vma->vm_lock->lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 tomoyo_ss &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: softirq (&lapb->t1timer) &lapb->lock batched_entropy_u8.lock crngs.lock irq_context: softirq (&lapb->t1timer) &lapb->lock batched_entropy_u8.lock crngs.lock base_crng.lock irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &meta->lock irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 kfence_freelist_lock irq_context: 0 &rq->__lock &obj_hash[i].lock irq_context: 0 &rq->__lock &base->lock irq_context: 0 &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 lock#5 irq_context: softirq (&lapb->t1timer) &lapb->lock &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET batched_entropy_u16.lock crngs.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &obj_hash[i].lock pool_lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sig->cred_guard_mutex &iint->mutex rcu_read_lock rcu_node_0 irq_context: 0 kn->active#47 fs_reclaim irq_context: 0 kn->active#47 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#47 &c->lock irq_context: 0 kn->active#47 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#47 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#47 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock khugepaged_mm_lock irq_context: 0 &mm->mmap_lock khugepaged_wait.lock irq_context: 0 &mm->mmap_lock khugepaged_wait.lock &p->pi_lock irq_context: 0 lock#3 &obj_hash[i].lock irq_context: 0 lock#3 rcu_read_lock &pool->lock irq_context: 0 lock#3 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 lock#3 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 lock#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 lock#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 lock#3 &rq->__lock irq_context: 0 lock#3 (work_completion)(work) irq_context: 0 rcu_read_lock &sighand->siglock irq_context: 0 rcu_read_lock &sighand->siglock pool_lock#2 irq_context: 0 rcu_read_lock &sighand->siglock &p->pi_lock irq_context: 0 &futex_queues[i].lock irq_context: 0 &mm->mmap_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rcu_read_lock &sighand->siglock batched_entropy_u8.lock irq_context: 0 rcu_read_lock &sighand->siglock kfence_freelist_lock irq_context: 0 &ep->mtx &ep->lock &ep->wq irq_context: 0 &ep->mtx &ep->lock &ep->wq &p->pi_lock irq_context: 0 &ep->mtx &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 &ep->mtx &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx &lock->wait_lock irq_context: 0 &f->f_pos_lock &p->lock irq_context: 0 &f->f_pos_lock &p->lock fs_reclaim irq_context: 0 &f->f_pos_lock &p->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock &p->lock &c->lock irq_context: 0 &f->f_pos_lock &p->lock cpufreq_driver_lock irq_context: 0 &f->f_pos_lock &p->lock &rq->__lock irq_context: 0 &f->f_pos_lock &p->lock &mm->mmap_lock irq_context: 0 kn->active#4 &c->lock irq_context: 0 kn->active#4 &n->list_lock irq_context: 0 kn->active#4 &n->list_lock &c->lock irq_context: 0 &ep->mtx kn->active#4 fs_reclaim irq_context: 0 &ep->mtx kn->active#4 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &ep->mtx kn->active#4 pool_lock#2 irq_context: 0 &ep->mtx kn->active#4 &on->poll irq_context: 0 &f->f_pos_lock &p->lock &of->mutex irq_context: 0 &f->f_pos_lock &p->lock &of->mutex kn->active#4 param_lock irq_context: 0 &ep->mtx rcu_read_lock &on->poll irq_context: 0 kn->active#4 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 &f->f_pos_lock &p->lock &n->list_lock irq_context: 0 &f->f_pos_lock &p->lock &n->list_lock &c->lock irq_context: 0 &ep->mtx &obj_hash[i].lock pool_lock irq_context: 0 &ep->mtx kn->active#4 &c->lock irq_context: 0 kn->active#4 remove_cache_srcu irq_context: 0 kn->active#4 remove_cache_srcu quarantine_lock irq_context: 0 kn->active#4 remove_cache_srcu &c->lock irq_context: 0 kn->active#4 remove_cache_srcu &n->list_lock irq_context: 0 kn->active#4 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &f->f_pos_lock &p->lock &____s->seqcount irq_context: 0 &f->f_pos_lock &p->lock pool_lock#2 irq_context: 0 &f->f_pos_lock &p->lock module_mutex irq_context: 0 sk_lock-AF_INET &____s->seqcount#8 irq_context: 0 sk_lock-AF_INET once_mutex irq_context: 0 sk_lock-AF_INET once_mutex crngs.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &hashinfo->ehash_locks[i] irq_context: 0 sk_lock-AF_INET batched_entropy_u32.lock irq_context: 0 sk_lock-AF_INET batched_entropy_u16.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &ei->socket.wq.wait irq_context: 0 sk_lock-AF_INET rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx &pipe->wr_wait irq_context: 0 rcu_read_lock tasklist_lock irq_context: 0 &ep->mtx rcu_read_lock &pipe->wr_wait irq_context: 0 &sb->s_type->i_lock_key#7 irq_context: 0 sb_writers#9 irq_context: 0 sb_writers#9 &attr->mutex irq_context: 0 sb_writers#9 &attr->mutex &mm->mmap_lock irq_context: 0 sb_writers#3 &p->pi_lock irq_context: 0 sb_writers#3 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#3 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 &rq->__lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &n->list_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &s->s_inode_list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->xattr_sem irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->xattr_sem &mapping->private_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ret->b_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &journal->j_revoke_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &meta_group_info[i]->alloc_sem irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle inode_hash_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle inode_hash_lock &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle batched_entropy_u32.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &xa->xa_lock#8 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &xa->xa_lock#8 pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle lock#4 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &mapping->private_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &wb->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &journal->j_revoke_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &sb->s_type->i_lock_key#22 &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &journal->j_wait_updates irq_context: 0 &type->s_umount_key#41/1 irq_context: 0 &type->s_umount_key#41/1 fs_reclaim irq_context: 0 &type->s_umount_key#41/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#41/1 pool_lock#2 irq_context: 0 &type->s_umount_key#41/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#41/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#41/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#41/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#41/1 sb_lock irq_context: 0 &type->s_umount_key#41/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#41/1 &root->kernfs_rwsem irq_context: 0 &type->s_umount_key#41/1 &root->kernfs_rwsem inode_hash_lock irq_context: 0 &type->s_umount_key#41/1 &root->kernfs_rwsem fs_reclaim irq_context: 0 &type->s_umount_key#41/1 &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#41/1 &root->kernfs_rwsem pool_lock#2 irq_context: 0 &type->s_umount_key#41/1 &root->kernfs_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#41/1 &root->kernfs_rwsem inode_hash_lock &sb->s_type->i_lock_key#30 irq_context: 0 &type->s_umount_key#41/1 &root->kernfs_rwsem inode_hash_lock &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#41/1 &root->kernfs_rwsem tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#41/1 &root->kernfs_rwsem &sb->s_type->i_lock_key#30 irq_context: 0 &type->s_umount_key#41/1 &sb->s_type->i_lock_key#30 irq_context: 0 &type->s_umount_key#41/1 &sb->s_type->i_lock_key#30 &dentry->d_lock irq_context: 0 &type->s_umount_key#41/1 &root->kernfs_supers_rwsem irq_context: 0 &type->s_umount_key#41/1 &dentry->d_lock irq_context: 0 sb_writers#10 irq_context: 0 sb_writers#10 mount_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 tomoyo_ss irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 tk_core.seq.seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_iattr_rwsem iattr_mutex irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_iattr_rwsem iattr_mutex fs_reclaim irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_iattr_rwsem iattr_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_iattr_rwsem iattr_mutex pool_lock#2 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_iattr_rwsem iattr_mutex tk_core.seq.seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 rename_lock.seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 fs_reclaim irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &dentry->d_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem inode_hash_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem fs_reclaim irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem inode_hash_lock &sb->s_type->i_lock_key#30 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem inode_hash_lock &s->s_inode_list_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem tk_core.seq.seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem &sb->s_type->i_lock_key#30 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &sb->s_type->i_lock_key#30 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &sb->s_type->i_lock_key#30 &dentry->d_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &sb->s_type->i_lock_key#30 &dentry->d_lock &wq#2 irq_context: 0 kn->active#48 fs_reclaim irq_context: 0 kn->active#48 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#48 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#48 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#48 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#48 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#48 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#15 irq_context: 0 sb_writers#10 fs_reclaim irq_context: 0 sb_writers#10 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 &mm->mmap_lock irq_context: 0 sb_writers#10 &of->mutex irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex css_set_lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock css_set_lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#10 &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &ei->i_es_lock key#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &n->list_lock &c->lock irq_context: 0 cgroup_mutex fs_reclaim irq_context: 0 cgroup_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cgroup_mutex cpu_hotplug_lock css_set_lock irq_context: 0 cgroup_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 cgroup_mutex css_set_lock cgroup_file_kn_lock irq_context: 0 &type->s_umount_key#42/1 irq_context: 0 &type->s_umount_key#42/1 fs_reclaim irq_context: 0 &type->s_umount_key#42/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#42/1 pool_lock#2 irq_context: 0 &type->s_umount_key#42/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#42/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#42/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#42/1 &c->lock irq_context: 0 &type->s_umount_key#42/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#42/1 sb_lock irq_context: 0 &type->s_umount_key#42/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_rwsem irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_rwsem inode_hash_lock irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_rwsem fs_reclaim irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_rwsem pool_lock#2 irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_rwsem inode_hash_lock &sb->s_type->i_lock_key#31 irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_rwsem inode_hash_lock &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_rwsem tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_rwsem &sb->s_type->i_lock_key#31 irq_context: 0 &type->s_umount_key#42/1 &sb->s_type->i_lock_key#31 irq_context: 0 &type->s_umount_key#42/1 &sb->s_type->i_lock_key#31 &dentry->d_lock irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_supers_rwsem irq_context: 0 &type->s_umount_key#42/1 &dentry->d_lock irq_context: 0 &type->s_umount_key#43 irq_context: 0 &type->s_umount_key#43 shrinker_rwsem irq_context: 0 &type->s_umount_key#43 percpu_ref_switch_lock irq_context: 0 &type->s_umount_key#43 percpu_ref_switch_lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#43 percpu_ref_switch_lock pool_lock#2 irq_context: 0 &type->s_umount_key#43 &root->kernfs_supers_rwsem irq_context: 0 &type->s_umount_key#43 rename_lock.seqcount irq_context: 0 &type->s_umount_key#43 &dentry->d_lock irq_context: 0 &type->s_umount_key#43 rcu_read_lock &dentry->d_lock irq_context: 0 &type->s_umount_key#43 &sb->s_type->i_lock_key#31 irq_context: 0 &type->s_umount_key#43 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#43 &xa->xa_lock#8 irq_context: 0 &type->s_umount_key#43 inode_hash_lock irq_context: 0 &type->s_umount_key#43 inode_hash_lock &sb->s_type->i_lock_key#31 irq_context: 0 &type->s_umount_key#43 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#43 pool_lock#2 irq_context: 0 &type->s_umount_key#43 &fsnotify_mark_srcu irq_context: 0 &type->s_umount_key#43 sb_lock irq_context: 0 sb_lock &obj_hash[i].lock pool_lock irq_context: 0 cgroup_mutex &n->list_lock &c->lock irq_context: 0 cgroup_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem tk_core.seq.seqcount irq_context: 0 cgroup_mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 cgroup_mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 cgroup_mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 cgroup_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 cgroup_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 cgroup_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 cgroup_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cgroup_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cgroup_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cgroup_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cgroup_mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 cgroup_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 cgroup_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 cgroup_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: softirq rcu_callback rcu_read_lock rcu_read_lock &pool->lock irq_context: softirq rcu_callback rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq rcu_callback rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq rcu_callback rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq rcu_callback rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq rcu_callback rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&cgrp->bpf.release_work) irq_context: 0 (wq_completion)events (work_completion)(&cgrp->bpf.release_work) cgroup_mutex irq_context: 0 (wq_completion)events (work_completion)(&cgrp->bpf.release_work) percpu_ref_switch_lock irq_context: 0 (wq_completion)events (work_completion)(&cgrp->bpf.release_work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&cgrp->bpf.release_work) pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&cgrp->bpf.release_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&cgrp->bpf.release_work) rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&cgrp->bpf.release_work) rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&cgrp->bpf.release_work) rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)cgroup_destroy irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&css->destroy_work) irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&css->destroy_work) cgroup_mutex irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&css->destroy_work) cgroup_mutex cgroup_rstat_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&css->destroy_work) cgroup_mutex cgroup_rstat_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&css->destroy_work) cgroup_mutex css_set_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&css->destroy_work) &obj_hash[i].lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&css->destroy_work) pool_lock#2 irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) percpu_ref_switch_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) pool_lock#2 irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) &cgrp->pidlist_mutex irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) (wq_completion)cgroup_pidlist_destroy irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) &wq->mutex irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) &wq->mutex &pool->lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) &wq->mutex &x->wait#10 irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) (work_completion)(&cgrp->release_agent_work) irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex css_set_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpu_hotplug_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpu_hotplug_lock css_set_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex css_set_lock &obj_hash[i].lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex css_set_lock pool_lock#2 irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_rstat_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_rstat_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) pcpu_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) &root->kernfs_rwsem irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) &root->kernfs_rwsem pool_lock#2 irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) kernfs_idr_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) kernfs_idr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex rcu_state.exp_mutex &pool->lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex rcu_state.exp_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex fs_reclaim irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex pool_lock#2 irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex lock kernfs_idr_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem tk_core.seq.seqcount irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#11 irq_context: 0 sb_writers#11 mount_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 tomoyo_ss irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 tomoyo_ss &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 tomoyo_ss pool_lock#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 tomoyo_ss tomoyo_policy_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 tk_core.seq.seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_iattr_rwsem iattr_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_iattr_rwsem iattr_mutex fs_reclaim irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_iattr_rwsem iattr_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_iattr_rwsem iattr_mutex tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 cgroup_mutex cpuset_mutex irq_context: 0 cgroup_mutex cpuset_mutex callback_lock irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_rwsem &c->lock irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_rwsem &pcp->lock &zone->lock irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_rwsem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &type->s_umount_key#42/1 &root->kernfs_rwsem &____s->seqcount irq_context: 0 &type->s_umount_key#42/1 &n->list_lock irq_context: 0 &type->s_umount_key#42/1 &n->list_lock &c->lock irq_context: 0 &type->s_umount_key#42/1 &rq->__lock irq_context: 0 cgroup_mutex lock kernfs_idr_lock &c->lock irq_context: 0 cgroup_mutex &dom->lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex &c->lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpuset_mutex irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex cpuset_mutex callback_lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex &dom->lock irq_context: 0 (wq_completion)cgroup_destroy (work_completion)(&(&css->destroy_rwork)->work) cgroup_mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 cgroup_mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 tomoyo_ss &pcp->lock &zone->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 rename_lock.seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 fs_reclaim irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &dentry->d_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem inode_hash_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem fs_reclaim irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem inode_hash_lock &sb->s_type->i_lock_key#31 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem inode_hash_lock &s->s_inode_list_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem tk_core.seq.seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &sb->s_type->i_lock_key#31 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &sb->s_type->i_lock_key#31 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &sb->s_type->i_lock_key#31 &dentry->d_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &sb->s_type->i_lock_key#31 &dentry->d_lock &wq#2 irq_context: 0 kn->active#49 fs_reclaim irq_context: 0 kn->active#49 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#49 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#49 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#49 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#16 irq_context: 0 sb_writers#11 fs_reclaim irq_context: 0 sb_writers#11 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &mm->mmap_lock irq_context: 0 sb_writers#11 &of->mutex irq_context: 0 sb_writers#11 &obj_hash[i].lock irq_context: 0 kn->active#50 fs_reclaim irq_context: 0 kn->active#50 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#50 &c->lock irq_context: 0 kn->active#50 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#50 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#50 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &of->mutex kn->active#50 cpu_hotplug_lock irq_context: 0 sb_writers#11 &of->mutex kn->active#50 cpu_hotplug_lock cpuset_mutex irq_context: 0 sb_writers#9 &mm->mmap_lock irq_context: 0 &type->s_umount_key#44 irq_context: 0 &type->s_umount_key#44 sb_lock irq_context: 0 &type->s_umount_key#44 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#17 irq_context: 0 &sb->s_type->i_mutex_key#17 namespace_sem irq_context: 0 &sb->s_type->i_mutex_key#17 namespace_sem rcu_read_lock mount_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#17 namespace_sem fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#17 namespace_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#17 namespace_sem rename_lock irq_context: 0 &sb->s_type->i_mutex_key#17 namespace_sem rename_lock rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#17 namespace_sem rename_lock rename_lock.seqcount &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#17 namespace_sem mount_lock irq_context: 0 &sb->s_type->i_mutex_key#17 namespace_sem mount_lock &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#17 namespace_sem mount_lock rcu_read_lock &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#17 namespace_sem mount_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#17 namespace_sem mount_lock pool_lock#2 irq_context: 0 &sb->s_type->i_lock_key#26 irq_context: 0 sb_writers#12 irq_context: 0 sb_writers#12 fs_reclaim irq_context: 0 sb_writers#12 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#12 &c->lock irq_context: 0 sb_writers#12 pool_lock#2 irq_context: 0 sb_writers#12 &mm->mmap_lock irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 rename_lock.seqcount irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 fs_reclaim irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 pool_lock#2 irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 &dentry->d_lock irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 &dentry->d_lock &wq irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 &sb->s_type->i_lock_key#26 irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 &s->s_inode_list_lock irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 tk_core.seq.seqcount irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 pin_fs_lock irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 sb_lock irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 &type->s_umount_key#44 irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 &type->s_umount_key#44 sb_lock irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 &type->s_umount_key#44 &dentry->d_lock irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 mnt_id_ida.xa_lock irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 pcpu_alloc_mutex irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 mount_lock irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 mount_lock mount_lock.seqcount irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 &obj_hash[i].lock irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 rcu_read_lock mount_lock irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 &sb->s_type->i_lock_key#26 &dentry->d_lock irq_context: 0 sb_writers#12 &sb->s_type->i_mutex_key#17 entries_lock irq_context: 0 cb_lock genl_mutex rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex irq_context: 0 rtnl_mutex dev_addr_sem irq_context: 0 rtnl_mutex dev_addr_sem net_rwsem irq_context: 0 rtnl_mutex dev_addr_sem net_rwsem &list->lock#2 irq_context: 0 rtnl_mutex dev_addr_sem &tn->lock irq_context: 0 rtnl_mutex dev_addr_sem &sdata->sec_mtx irq_context: 0 rtnl_mutex dev_addr_sem &sdata->sec_mtx &sec->lock irq_context: 0 rtnl_mutex dev_addr_sem fs_reclaim irq_context: 0 rtnl_mutex dev_addr_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex dev_addr_sem pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem nl_table_lock irq_context: 0 rtnl_mutex dev_addr_sem rlock-AF_NETLINK irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &ei->socket.wq.wait irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 rtnl_mutex dev_addr_sem nl_table_wait.lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock irq_context: 0 rtnl_mutex dev_addr_sem &pn->hash_lock irq_context: 0 rtnl_mutex dev_addr_sem &net->ipv6.fib6_gc_lock rcu_read_lock &tb->tb6_lock irq_context: 0 rtnl_mutex dev_addr_sem &net->ipv6.fib6_gc_lock rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 rtnl_mutex dev_addr_sem &net->ipv6.fib6_gc_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex dev_addr_sem input_pool.lock irq_context: 0 rtnl_mutex _xmit_IEEE802154 irq_context: 0 rtnl_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 rtnl_mutex rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 rtnl_mutex proc_subdir_lock irq_context: 0 rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 rtnl_mutex proc_subdir_lock irq_context: 0 rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 rtnl_mutex rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rename_lock.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 fs_reclaim irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &mapping->private_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &dentry->d_lock &wq#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock key#8 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem key#14 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &wb->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &sbi->s_md_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &journal->j_revoke_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem key#3 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock key#7 irq_context: 0 &sb->s_type->i_mutex_key#10 nl_table_lock nl_table_wait.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nl_table_wait.lock &p->pi_lock irq_context: 0 nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &wb->list_lock irq_context: 0 &sbi->s_writepages_rwsem irq_context: 0 &sbi->s_writepages_rwsem &xa->xa_lock#8 irq_context: 0 &sbi->s_writepages_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &sbi->s_writepages_rwsem pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem lock#4 irq_context: 0 &sbi->s_writepages_rwsem lock#4 &lruvec->lru_lock irq_context: 0 &sbi->s_writepages_rwsem lock#5 irq_context: 0 &sbi->s_writepages_rwsem &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem &c->lock irq_context: 0 &sbi->s_writepages_rwsem &____s->seqcount irq_context: 0 &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle lock#4 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle lock#5 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_es_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &memcg->move_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#8 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#8 &s->s_inode_wblist_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates irq_context: 0 &sbi->s_writepages_rwsem tk_core.seq.seqcount irq_context: 0 &sbi->s_writepages_rwsem &dd->lock irq_context: 0 &sbi->s_writepages_rwsem &base->lock irq_context: 0 &sbi->s_writepages_rwsem &base->lock &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &dd->lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock tk_core.seq.seqcount irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#8 &pl->lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#8 &pl->lock key#12 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#8 &wb->work_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#8 &wb->work_lock &base->lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#8 &wb->work_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &folio_wait_table[i] irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &folio_wait_table[i] &p->pi_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &folio_wait_table[i] &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &folio_wait_table[i] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_wait_commit irq_context: 0 &iint->mutex sb_writers#4 &journal->j_state_lock irq_context: 0 &iint->mutex sb_writers#4 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 &iint->mutex sb_writers#4 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 &iint->mutex sb_writers#4 &journal->j_state_lock &base->lock irq_context: 0 &iint->mutex sb_writers#4 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 &iint->mutex mapping.invalidate_lock &xa->xa_lock#8 &n->list_lock irq_context: 0 &iint->mutex mapping.invalidate_lock &xa->xa_lock#8 &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#8 free_vmap_area_lock irq_context: 0 &sb->s_type->i_mutex_key#8 vmap_area_lock irq_context: 0 &sb->s_type->i_mutex_key#8 init_mm.page_table_lock irq_context: 0 &sb->s_type->i_mutex_key#8 free_vmap_area_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 free_vmap_area_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 swap_cgroup_mutex irq_context: 0 &sb->s_type->i_mutex_key#8 swap_cgroup_mutex fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#8 swap_cgroup_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#8 swap_cgroup_mutex &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 swap_cgroup_mutex pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &journal->j_state_lock irq_context: 0 &sb->s_type->i_mutex_key#8 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &base->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock &q->requeue_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &x->wait#26 irq_context: 0 &sb->s_type->i_mutex_key#8 (&timer.timer) irq_context: 0 &sb->s_type->i_mutex_key#8 &ei->i_es_lock irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex swap_lock irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex swap_lock &p->lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex percpu_ref_switch_lock irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex swap_lock &p->lock#2 swap_avail_lock irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex (console_sem).lock irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex console_lock console_srcu console_owner_lock irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex console_lock console_srcu console_owner irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 &sb->s_type->i_mutex_key#8 swapon_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 &sb->s_type->i_mutex_key#8 proc_poll_wait.lock irq_context: 0 swap_slots_cache_enable_mutex irq_context: 0 swap_slots_cache_enable_mutex cpu_hotplug_lock irq_context: 0 swap_slots_cache_enable_mutex cpu_hotplug_lock cpuhp_state_mutex irq_context: 0 swap_slots_cache_enable_mutex cpu_hotplug_lock cpuhp_state_mutex cpuhp_state-down irq_context: 0 swap_slots_cache_enable_mutex cpu_hotplug_lock cpuhp_state_mutex cpuhp_state-up irq_context: 0 swap_slots_cache_enable_mutex cpu_hotplug_lock cpuhp_state_mutex &p->pi_lock irq_context: 0 swap_slots_cache_enable_mutex cpu_hotplug_lock cpuhp_state_mutex &x->wait#6 irq_context: 0 swap_slots_cache_enable_mutex cpu_hotplug_lock cpuhp_state_mutex &rq->__lock irq_context: 0 swap_slots_cache_enable_mutex cpu_hotplug_lock cpuhp_state_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cpu_hotplug_lock cpuhp_state-up swap_slots_cache_mutex irq_context: 0 swap_slots_cache_enable_mutex cpu_hotplug_lock cpuhp_state_mutex &p->pi_lock &rq->__lock irq_context: 0 swap_slots_cache_enable_mutex cpu_hotplug_lock cpuhp_state_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 swap_slots_cache_enable_mutex swap_lock irq_context: 0 &sighand->siglock rcu_read_lock &____s->seqcount#5 irq_context: 0 &sighand->siglock &prev->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET &mm->mmap_lock &rq->__lock irq_context: softirq _xmit_ETHER#2 quarantine_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 sk_lock-AF_INET rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock &sighand->siglock &c->lock irq_context: 0 rcu_read_lock &sighand->siglock &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &zone->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &zone->lock &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &pcp->lock &zone->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex &rq->__lock irq_context: softirq _xmit_ETHER#2 &meta->lock irq_context: softirq _xmit_ETHER#2 kfence_freelist_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &vma->vm_lock->lock batched_entropy_u8.lock irq_context: 0 &vma->vm_lock->lock kfence_freelist_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock kfence_freelist_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &meta->lock irq_context: 0 &mm->mmap_lock rcu_read_lock &____s->seqcount irq_context: 0 &vma->vm_lock->lock &p->pi_lock irq_context: 0 &vma->vm_lock->lock &p->pi_lock &rq->__lock irq_context: 0 &vma->vm_lock->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET quarantine_lock irq_context: 0 &rq->__lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET rcu_node_0 irq_context: softirq &(&wb->dwork)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock &rq->__lock irq_context: 0 sk_lock-AF_INET &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) rcu_read_lock &stopper->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) rcu_read_lock &stop_pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) rcu_read_lock &stop_pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) rcu_read_lock &stop_pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock &sighand->siglock &p->pi_lock &rq->__lock irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET fs_reclaim &rq->__lock irq_context: 0 lock#3 rcu_read_lock (wq_completion)mm_percpu_wq irq_context: 0 lock#3 &x->wait#10 irq_context: 0 lock#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&barr->work) irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&barr->work) &x->wait#10 irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&barr->work) &x->wait#10 &p->pi_lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &anon_vma->rwsem ptlock_ptr(page)#2 irq_context: 0 &mm->mmap_lock &anon_vma->rwsem ptlock_ptr(page)#2 &lruvec->lru_lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page) lock#4 irq_context: 0 &mm->mmap_lock ptlock_ptr(page) lock#4 &lruvec->lru_lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page) lock#4 rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock ptlock_ptr(page) lock#4 &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &rq->__lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 quarantine_lock irq_context: 0 &mm->mmap_lock &p->pi_lock irq_context: 0 &mm->mmap_lock &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &ep->mtx &mm->mmap_lock &p->pi_lock irq_context: 0 &ep->mtx &mm->mmap_lock &p->pi_lock &rq->__lock irq_context: 0 &ep->mtx &mm->mmap_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 quarantine_lock irq_context: 0 sk_lock-AF_INET remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 sk_lock-AF_INET remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: softirq &(&tbl->managed_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock batched_entropy_u8.lock irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock kfence_freelist_lock irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &meta->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &pcp->lock &zone->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sk_lock-AF_INET batched_entropy_u8.lock crngs.lock irq_context: 0 sk_lock-AF_INET batched_entropy_u8.lock crngs.lock base_crng.lock irq_context: softirq (&timer) irq_context: softirq (&timer) &obj_hash[i].lock irq_context: softirq (&timer) &base->lock irq_context: softirq (&timer) &base->lock &obj_hash[i].lock irq_context: softirq (&timer) rcu_read_lock pool_lock#2 irq_context: softirq (&timer) rcu_read_lock &c->lock irq_context: softirq (&timer) rcu_read_lock &____s->seqcount irq_context: softirq (&timer) &txlock irq_context: softirq (&timer) &txlock &list->lock#3 irq_context: softirq (&timer) &txwq irq_context: softirq (&timer) &txwq &p->pi_lock irq_context: softirq (&timer) &txwq &p->pi_lock &rq->__lock irq_context: softirq (&timer) &txwq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 rcu_read_lock_bh &list->lock#5 irq_context: softirq &list->lock#5 irq_context: 0 rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_SLIP#2 irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_SLIP#2 &eql->queue.lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_SLIP#2 &eql->queue.lock &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_SLIP#2 &eql->queue.lock pool_lock#2 irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM (console_sem).lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM console_lock console_srcu console_owner_lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM console_lock console_srcu console_owner irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM console_lock console_srcu console_owner &port_lock_key irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM console_lock console_srcu console_owner console_owner_lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM pool_lock#2 irq_context: 0 rcu_read_lock_bh _xmit_X25#2 irq_context: 0 rcu_read_lock_bh _xmit_X25#2 &lapbeth->up_lock irq_context: 0 rcu_read_lock_bh _xmit_X25#2 &lapbeth->up_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh _xmit_X25#2 &lapbeth->up_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rcu_read_lock &sighand->siglock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem &journal->j_state_lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem &journal->j_state_lock &base->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &journal->j_revoke_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle tk_core.seq.seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &base->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &dd->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &dd->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &base->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &rq_wait->wait irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &pool->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_node_0 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle batched_entropy_u8.lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle kfence_freelist_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &c->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem rcu_read_lock &c->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock irq_context: softirq &ei->i_completed_io_lock rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#8 &obj_hash[i].lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#8 &base->lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &xa->xa_lock#8 &base->lock &obj_hash[i].lock irq_context: softirq &rq_wait->wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu &c->lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem rcu_read_lock &memcg->move_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem rcu_read_lock &xa->xa_lock#8 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &meta->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 kfence_freelist_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &pcp->lock &zone->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &pcp->lock &zone->lock &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock batched_entropy_u8.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock kfence_freelist_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &meta->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &ul->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: softirq _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: softirq _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq net/wireless/reg.c:236 irq_context: softirq net/wireless/reg.c:236 rcu_read_lock &pool->lock irq_context: softirq net/wireless/reg.c:236 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq net/wireless/reg.c:236 rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq net/wireless/reg.c:236 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq net/wireless/reg.c:236 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem (&timer.timer) irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem fw_lock &x->wait#23 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem dev_pm_qos_sysfs_mtx irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &root->kernfs_rwsem pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem kernfs_idr_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem &k->k_lock klist_remove_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem subsys mutex#80 &k->k_lock klist_remove_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem deferred_probe_mutex irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem device_links_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem uevent_sock_mutex mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem uevent_sock_mutex pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem uevent_sock_mutex nl_table_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem uevent_sock_mutex nl_table_wait.lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem uevent_sock_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem gdp_mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) umhelper_sem gdp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) fw_lock irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work rtnl_mutex irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work rtnl_mutex &rdev->wiphy.mtx irq_context: 0 (wq_completion)events_power_efficient (reg_check_chans).work rtnl_mutex &rdev->wiphy.mtx &wdev->mtx irq_context: 0 inode_hash_lock &sb->s_type->i_lock_key#24 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex reg_indoor_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex krc.lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex krc.lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex reg_requests_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex reg_pending_beacons_lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&fw_work->work) rtnl_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &vma->vm_lock->lock remove_cache_srcu &rq->__lock irq_context: 0 &vma->vm_lock->lock remove_cache_srcu &rq->__lock &cfs_rq->removed.lock irq_context: 0 &vma->vm_lock->lock remove_cache_srcu rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &vma->vm_lock->lock remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &vma->vm_lock->lock remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &vma->vm_lock->lock remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &ret->b_state_lock &journal->j_list_lock &obj_hash[i].lock irq_context: 0 &ret->b_state_lock &journal->j_list_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET &mm->mmap_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 batched_entropy_u8.lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 kfence_freelist_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 lock#4 &lruvec->lru_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem bit_wait_table + i irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &dd->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &pool->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_node_0 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock &xa->xa_lock#8 &pl->lock irq_context: softirq rcu_read_lock &xa->xa_lock#8 &pl->lock key#12 irq_context: softirq &ret->b_uptodate_lock bit_wait_table + i irq_context: softirq &ret->b_uptodate_lock bit_wait_table + i &p->pi_lock irq_context: softirq &ret->b_uptodate_lock bit_wait_table + i &p->pi_lock &rq->__lock irq_context: softirq &ret->b_uptodate_lock bit_wait_table + i &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#32 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: softirq net/wireless/reg.c:533 irq_context: softirq net/wireless/reg.c:533 rcu_read_lock &pool->lock irq_context: softirq net/wireless/reg.c:533 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq net/wireless/reg.c:533 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex reg_indoor_lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex krc.lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex reg_requests_lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex reg_pending_beacons_lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &____s->seqcount irq_context: softirq &(&krcp->monitor_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &____s->seqcount irq_context: softirq net/wireless/reg.c:533 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq net/wireless/reg.c:533 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex krc.lock &base->lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex krc.lock &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock quarantine_lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex &c->lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#9 &rq->__lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &rq->__lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 tomoyo_ss &rq->__lock irq_context: 0 sb_writers#3 tomoyo_ss &rq->__lock irq_context: 0 lock#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &ret->b_state_lock &journal->j_list_lock &wb->work_lock irq_context: 0 &ret->b_state_lock &journal->j_list_lock &wb->work_lock &obj_hash[i].lock irq_context: 0 &ret->b_state_lock &journal->j_list_lock &wb->work_lock &base->lock irq_context: 0 &ret->b_state_lock &journal->j_list_lock &wb->work_lock &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rcu_read_lock rcu_read_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex &c->lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex &____s->seqcount irq_context: softirq (&n->timer) irq_context: softirq (&n->timer) &n->lock irq_context: softirq (&n->timer) &n->lock &obj_hash[i].lock irq_context: softirq (&n->timer) &n->lock &base->lock irq_context: softirq (&n->timer) &n->lock &base->lock &obj_hash[i].lock irq_context: 0 &rq->__lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &rq->__lock rcu_read_lock &base->lock irq_context: 0 &rq->__lock rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock &vma->vm_lock->lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq &(&ovs_net->masks_rebalance)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock &rq->__lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &rq->__lock &base->lock irq_context: softirq rcu_read_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: softirq &c->lock batched_entropy_u8.lock irq_context: softirq &c->lock kfence_freelist_lock irq_context: 0 tasklist_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: 0 sb_writers#7 rcu_node_0 irq_context: 0 sb_writers#7 &rcu_state.expedited_wq irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 sb_writers#7 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#7 &rq->__lock irq_context: 0 sb_writers#7 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 lock#3 rcu_read_lock &rq->__lock irq_context: 0 lock#3 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rq->__lock irq_context: softirq mm/vmstat.c:2018 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock rlock-AF_PACKET irq_context: 0 sk_lock-AF_INET rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(work) lock#4 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &vma->vm_lock->lock pgd_lock irq_context: 0 &vma->vm_lock->lock key irq_context: 0 &vma->vm_lock->lock pcpu_lock irq_context: 0 &vma->vm_lock->lock percpu_counters_lock irq_context: 0 rcu_read_lock &sighand->siglock batched_entropy_u8.lock crngs.lock irq_context: 0 rcu_read_lock &sighand->siglock batched_entropy_u8.lock crngs.lock base_crng.lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex kfence_freelist_lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events reg_work rtnl_mutex &meta->lock irq_context: 0 &ep->mtx fs_reclaim &rq->__lock irq_context: 0 sb_writers#3 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &vma->vm_lock->lock mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)events_power_efficient (crda_timeout).work rtnl_mutex kfence_freelist_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&sk->sk_timer) irq_context: softirq (&sk->sk_timer) slock-AF_INET irq_context: softirq (&sk->sk_timer) slock-AF_INET tk_core.seq.seqcount irq_context: softirq (&sk->sk_timer) slock-AF_INET &obj_hash[i].lock irq_context: softirq (&sk->sk_timer) slock-AF_INET &base->lock irq_context: softirq (&sk->sk_timer) slock-AF_INET &base->lock &obj_hash[i].lock irq_context: 0 &sighand->siglock quarantine_lock irq_context: 0 rcu_read_lock &sighand->siglock &pcp->lock &zone->lock irq_context: 0 rcu_read_lock &sighand->siglock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sb->s_type->i_lock_key &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &journal->j_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &journal->j_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &base->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ret->b_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &mapping->private_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sb->s_type->i_lock_key#22 &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &xa->xa_lock#8 irq_context: 0 &mm->mmap_lock &info->lock irq_context: 0 &mm->mmap_lock tk_core.seq.seqcount irq_context: 0 &mm->mmap_lock mount_lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_lock_key irq_context: 0 &mm->mmap_lock &wb->list_lock irq_context: 0 &mm->mmap_lock &wb->list_lock &sb->s_type->i_lock_key irq_context: 0 &mm->mmap_lock sb_writers#5 mount_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &wb->work_lock irq_context: 0 &mm->mmap_lock sb_writers#5 &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &wb->work_lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock sb_writers#5 tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &wb->work_lock &base->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &wb->work_lock &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock sb_writers#5 &sb->s_type->i_lock_key irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &mm->mmap_lock sb_writers#5 &wb->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 &mm->mmap_lock sb_writers#5 &wb->list_lock &sb->s_type->i_lock_key irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 sb_writers#5 tomoyo_ss rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 tomoyo_ss rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &rq->__lock irq_context: 0 &mm->mmap_lock &sem->wait_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_es_lock key#2 irq_context: 0 &ep->mtx &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sem->wait_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex sb_writers#4 jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss tomoyo_policy_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &mm->mmap_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sem->wait_lock irq_context: 0 sb_writers#4 &p->pi_lock irq_context: 0 sb_writers#4 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#9 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock pool_lock#2 irq_context: 0 &newf->file_lock &newf->resize_wait irq_context: 0 &kcov->lock irq_context: 0 &mm->mmap_lock &kcov->lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 key irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock key#8 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock key#7 irq_context: 0 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss tomoyo_policy_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_es_lock key#6 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &n->list_lock &c->lock irq_context: 0 &kcov->lock kcov_remote_lock irq_context: 0 &kcov->lock kcov_remote_lock pool_lock#2 irq_context: 0 pid_caches_mutex irq_context: 0 pid_caches_mutex slab_mutex irq_context: 0 pid_caches_mutex slab_mutex fs_reclaim irq_context: 0 pid_caches_mutex slab_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pid_caches_mutex slab_mutex pool_lock#2 irq_context: 0 pid_caches_mutex slab_mutex &c->lock irq_context: 0 pid_caches_mutex slab_mutex &n->list_lock irq_context: 0 pid_caches_mutex slab_mutex pcpu_alloc_mutex irq_context: 0 pid_caches_mutex slab_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 pid_caches_mutex slab_mutex &root->kernfs_rwsem irq_context: 0 pid_caches_mutex slab_mutex &k->list_lock irq_context: 0 pid_caches_mutex slab_mutex lock irq_context: 0 pid_caches_mutex slab_mutex lock kernfs_idr_lock irq_context: 0 pid_caches_mutex slab_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 pid_caches_mutex slab_mutex &n->list_lock &c->lock irq_context: 0 &type->s_umount_key#45 irq_context: 0 &type->s_umount_key#45 sb_lock irq_context: 0 &type->s_umount_key#45 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#18 irq_context: 0 &sb->s_type->i_mutex_key#18 namespace_sem irq_context: 0 &sb->s_type->i_mutex_key#18 namespace_sem rcu_read_lock mount_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#18 namespace_sem fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#18 namespace_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#18 namespace_sem rename_lock irq_context: 0 &sb->s_type->i_mutex_key#18 namespace_sem rename_lock rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#18 namespace_sem rename_lock rename_lock.seqcount &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#18 namespace_sem mount_lock irq_context: 0 &sb->s_type->i_mutex_key#18 namespace_sem mount_lock &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#18 namespace_sem mount_lock rcu_read_lock &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#18 namespace_sem mount_lock &obj_hash[i].lock irq_context: 0 bt_proto_lock hci_sk_list.lock irq_context: 0 misc_mtx &base->lock irq_context: 0 misc_mtx &base->lock &obj_hash[i].lock irq_context: 0 (work_completion)(&(&data->open_timeout)->work) irq_context: 0 &data->open_mutex irq_context: 0 &data->open_mutex fs_reclaim irq_context: 0 &data->open_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &data->open_mutex pool_lock#2 irq_context: 0 &data->open_mutex &____s->seqcount irq_context: 0 &data->open_mutex &obj_hash[i].lock irq_context: 0 &data->open_mutex &x->wait#9 irq_context: 0 &data->open_mutex hci_index_ida.xa_lock irq_context: 0 &data->open_mutex cpu_hotplug_lock irq_context: 0 &data->open_mutex cpu_hotplug_lock wq_pool_mutex irq_context: 0 &data->open_mutex cpu_hotplug_lock wq_pool_mutex fs_reclaim irq_context: 0 &data->open_mutex cpu_hotplug_lock wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &data->open_mutex cpu_hotplug_lock wq_pool_mutex &c->lock irq_context: 0 &data->open_mutex cpu_hotplug_lock wq_pool_mutex pool_lock#2 irq_context: 0 &data->open_mutex cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock irq_context: 0 &data->open_mutex cpu_hotplug_lock wq_pool_mutex &wq->mutex irq_context: 0 &data->open_mutex cpu_hotplug_lock wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 &data->open_mutex wq_pool_mutex irq_context: 0 &data->open_mutex wq_pool_mutex &wq->mutex irq_context: 0 &data->open_mutex pin_fs_lock irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 &data->open_mutex &k->list_lock irq_context: 0 &data->open_mutex gdp_mutex irq_context: 0 &data->open_mutex gdp_mutex &k->list_lock irq_context: 0 &data->open_mutex gdp_mutex fs_reclaim irq_context: 0 &data->open_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &data->open_mutex gdp_mutex pool_lock#2 irq_context: 0 &data->open_mutex gdp_mutex &c->lock irq_context: 0 &data->open_mutex gdp_mutex lock irq_context: 0 &data->open_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 &data->open_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 &data->open_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &data->open_mutex lock irq_context: 0 &data->open_mutex lock kernfs_idr_lock irq_context: 0 &data->open_mutex &root->kernfs_rwsem irq_context: 0 &data->open_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &data->open_mutex bus_type_sem irq_context: 0 &data->open_mutex sysfs_symlink_target_lock irq_context: 0 &data->open_mutex &root->kernfs_rwsem irq_context: 0 &data->open_mutex &dev->power.lock irq_context: 0 &data->open_mutex dpm_list_mtx irq_context: 0 &data->open_mutex uevent_sock_mutex irq_context: 0 &data->open_mutex uevent_sock_mutex fs_reclaim irq_context: 0 &data->open_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &data->open_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 &data->open_mutex uevent_sock_mutex nl_table_lock irq_context: 0 &data->open_mutex uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 &data->open_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 &data->open_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 &data->open_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 &data->open_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 &data->open_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 &data->open_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &data->open_mutex uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 &data->open_mutex uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 &data->open_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 &data->open_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &data->open_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &data->open_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &data->open_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &data->open_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 &data->open_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 &data->open_mutex &pcp->lock &zone->lock irq_context: 0 &data->open_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &data->open_mutex rcu_read_lock pool_lock#2 irq_context: 0 &data->open_mutex subsys mutex#81 irq_context: 0 &data->open_mutex subsys mutex#81 &k->k_lock irq_context: 0 &data->open_mutex &c->lock irq_context: 0 &data->open_mutex &dev->devres_lock irq_context: 0 &data->open_mutex triggers_list_lock irq_context: 0 &data->open_mutex leds_list_lock irq_context: 0 &data->open_mutex leds_list_lock &led_cdev->trigger_lock irq_context: 0 &data->open_mutex rfkill_global_mutex irq_context: 0 &data->open_mutex rfkill_global_mutex fs_reclaim irq_context: 0 &data->open_mutex rfkill_global_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &data->open_mutex rfkill_global_mutex pool_lock#2 irq_context: 0 &data->open_mutex rfkill_global_mutex &k->list_lock irq_context: 0 &data->open_mutex rfkill_global_mutex lock irq_context: 0 &data->open_mutex rfkill_global_mutex lock kernfs_idr_lock irq_context: 0 &data->open_mutex rfkill_global_mutex &root->kernfs_rwsem irq_context: 0 &data->open_mutex rfkill_global_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &data->open_mutex rfkill_global_mutex bus_type_sem irq_context: 0 &data->open_mutex rfkill_global_mutex sysfs_symlink_target_lock irq_context: 0 &data->open_mutex rfkill_global_mutex &c->lock irq_context: 0 &data->open_mutex rfkill_global_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &data->open_mutex rfkill_global_mutex &root->kernfs_rwsem irq_context: 0 &data->open_mutex rfkill_global_mutex &dev->power.lock irq_context: 0 &data->open_mutex rfkill_global_mutex dpm_list_mtx irq_context: 0 &data->open_mutex rfkill_global_mutex &rfkill->lock irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex fs_reclaim irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex nl_table_lock irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 &data->open_mutex rfkill_global_mutex &obj_hash[i].lock irq_context: 0 &data->open_mutex rfkill_global_mutex &k->k_lock irq_context: 0 &data->open_mutex rfkill_global_mutex subsys mutex#41 irq_context: 0 &data->open_mutex rfkill_global_mutex subsys mutex#41 &k->k_lock irq_context: 0 &data->open_mutex rfkill_global_mutex triggers_list_lock irq_context: 0 &data->open_mutex rfkill_global_mutex leds_list_lock irq_context: 0 &data->open_mutex rfkill_global_mutex leds_list_lock &led_cdev->trigger_lock irq_context: 0 &data->open_mutex rfkill_global_mutex rcu_read_lock &pool->lock irq_context: 0 &data->open_mutex rfkill_global_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &data->open_mutex rfkill_global_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &data->open_mutex rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &data->open_mutex rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &data->open_mutex rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &data->open_mutex rfkill_global_mutex &rq->__lock irq_context: 0 &data->open_mutex rfkill_global_mutex.wait_lock irq_context: 0 &data->open_mutex &p->pi_lock irq_context: 0 &data->open_mutex &p->pi_lock &rq->__lock irq_context: 0 &data->open_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &data->open_mutex &rq->__lock irq_context: 0 &data->open_mutex &rfkill->lock irq_context: 0 &data->open_mutex hci_dev_list_lock irq_context: 0 &data->open_mutex tk_core.seq.seqcount irq_context: 0 &data->open_mutex hci_sk_list.lock irq_context: 0 &data->open_mutex (pm_chain_head).rwsem irq_context: 0 &data->open_mutex rcu_read_lock &pool->lock/1 irq_context: 0 &data->open_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &data->open_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &data->open_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &data->open_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &data->open_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci0 irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) irq_context: 0 &data->open_mutex &list->lock#6 irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock irq_context: 0 &data->open_mutex &data->read_wait irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock &obj_hash[i].lock irq_context: 0 &list->lock#6 irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock &list->lock#7 irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock pool_lock#2 irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock &list->lock#8 irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock &hdev->req_wait_q irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock &base->lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock &rq->__lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI slock-AF_BLUETOOTH-BTPROTO_HCI irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI sock_cookie_ida.xa_lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &p->alloc_lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI pool_lock#2 irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI tk_core.seq.seqcount irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI hci_sk_list.lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &obj_hash[i].lock irq_context: 0 slock-AF_BLUETOOTH-BTPROTO_HCI irq_context: 0 hci_dev_list_lock irq_context: 0 (wq_completion)hci0#2 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) &list->lock#7 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) fs_reclaim irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) &c->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) pool_lock#2 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) &list->lock#6 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) &data->read_wait irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 &list->lock#7 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &list->lock#7 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) lock#6 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) lock#6 kcov_remote_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) fs_reclaim irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) pool_lock#2 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) free_vmap_area_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) vmap_area_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &____s->seqcount irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) init_mm.page_table_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &pcp->lock &zone->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) (console_sem).lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &rq->__lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) lock#6 &kcov->lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock (&timer.timer) irq_context: 0 &data->read_wait irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &base->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 &data->open_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &base->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci1 irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock &c->lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock &____s->seqcount irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock &list->lock#7 irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock &list->lock#8 irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock &hdev->req_wait_q irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock &base->lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock &rq->__lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci1#2 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) &list->lock#7 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) fs_reclaim irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) &list->lock#6 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) &data->read_wait irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &list->lock#7 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) lock#6 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) lock#6 kcov_remote_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) fs_reclaim irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) (console_sem).lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) lock#6 &kcov->lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock (&timer.timer) irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock &c->lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock &____s->seqcount irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock pool_lock#2 irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &base->lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 &data->open_mutex &n->list_lock irq_context: 0 &data->open_mutex &n->list_lock &c->lock irq_context: 0 &data->open_mutex cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock pool_lock irq_context: 0 &data->open_mutex rfkill_global_mutex &n->list_lock irq_context: 0 &data->open_mutex rfkill_global_mutex &n->list_lock &c->lock irq_context: 0 &data->open_mutex rfkill_global_mutex &____s->seqcount irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex &c->lock irq_context: 0 &data->open_mutex rfkill_global_mutex uevent_sock_mutex &____s->seqcount irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &rq->__lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &base->lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci2 irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock &list->lock#7 irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock &list->lock#8 irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock &hdev->req_wait_q irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock &base->lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock &rq->__lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&pool->mayday_timer) &pool->lock/1 irq_context: softirq (&pool->mayday_timer) &pool->lock/1 wq_mayday_lock irq_context: softirq (&pool->mayday_timer) &obj_hash[i].lock irq_context: softirq (&pool->mayday_timer) &base->lock irq_context: softirq (&pool->mayday_timer) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) &list->lock#7 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) fs_reclaim irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) &list->lock#6 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) &data->read_wait irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &c->lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &list->lock#7 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) lock#6 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) lock#6 kcov_remote_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) fs_reclaim irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) (console_sem).lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) console_owner_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) console_owner irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &rq->__lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock kfence_freelist_lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) lock#6 &kcov->lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) console_owner_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) console_owner irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock (&timer.timer) irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &base->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) console_owner_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) console_owner irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &base->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &c->lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &____s->seqcount irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &hdev->req_lock hci_sk_list.lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) fs_reclaim irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) pool_lock#2 irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) hci_sk_list.lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->power_on) &obj_hash[i].lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &hdev->req_lock irq_context: 0 &hdev->req_lock pool_lock#2 irq_context: 0 &hdev->req_lock &list->lock#8 irq_context: 0 &hdev->req_lock &list->lock#7 irq_context: 0 &hdev->req_lock rcu_read_lock &pool->lock/1 irq_context: 0 &hdev->req_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &hdev->req_lock &rq->__lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &hdev->req_lock hci_sk_list.lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) fs_reclaim irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) hci_sk_list.lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &obj_hash[i].lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &c->lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) &____s->seqcount irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->power_on) pool_lock#2 irq_context: 0 cgroup_threadgroup_rwsem &sighand->siglock irq_context: 0 &hdev->req_lock &obj_hash[i].lock irq_context: 0 cgroup_threadgroup_rwsem &sighand->siglock &rq->__lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) chan_list_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock fs_reclaim irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock pool_lock#2 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &x->wait#9 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) free_vmap_area_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) vmap_area_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &k->list_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) init_mm.page_table_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock lock kernfs_idr_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock bus_type_sem irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &pcp->lock &zone->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock sysfs_symlink_target_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &c->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &dev->power.lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock dpm_list_mtx irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex fs_reclaim irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex pool_lock#2 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex nl_table_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex nl_table_wait.lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &k->k_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock subsys mutex#81 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock subsys mutex#81 &k->k_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &list->lock#7 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock fs_reclaim irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock pool_lock#2 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock &hdev->cmd_sync_work_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) &hdev->cmd_sync_work_lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) pool_lock#2 irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) &c->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) &____s->seqcount irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->cmd_work) pool_lock#2 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) pool_lock#2 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fs_reclaim irq_context: 0 &data->open_mutex rfkill_global_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock pool_lock#2 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock chan_list_lock irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->ident_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &base->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &list->lock#9 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->chan_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &base->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci3 irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock &list->lock#7 irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock &list->lock#8 irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock &hdev->req_wait_q irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock &base->lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock &rq->__lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci3#2 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->tx_work) irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->tx_work) &list->lock#9 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) &list->lock#7 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->tx_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) fs_reclaim irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->tx_work) &list->lock#6 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->tx_work) &data->read_wait irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) &c->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->tx_work) &list->lock#7 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) &____s->seqcount irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) &list->lock#6 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) &data->read_wait irq_context: 0 (wq_completion)hci0#2 (work_completion)(&conn->pending_rx_work) irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&conn->pending_rx_work) &list->lock#10 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) chan_list_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock fs_reclaim irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI slock-AF_BLUETOOTH-BTPROTO_HCI irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI pool_lock#2 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &x->wait#9 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI hci_sk_list.lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI sock_cookie_ida.xa_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI hci_sk_list.lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI clock-AF_BLUETOOTH irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_BLUETOOTH-BTPROTO_HCI irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &k->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 hci_dev_list_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock lock kernfs_idr_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rlock-AF_BLUETOOTH irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &sb->s_type->i_mutex_key#10 wlock-AF_BLUETOOTH irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock bus_type_sem irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock sysfs_symlink_target_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &c->lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &dev->power.lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock dpm_list_mtx irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex fs_reclaim irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex nl_table_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex &c->lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex &____s->seqcount irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex pool_lock#2 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex nl_table_wait.lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &k->k_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock subsys mutex#81 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock subsys mutex#81 &k->k_lock irq_context: 0 &sb->s_type->i_mutex_key#19 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &list->lock#7 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock fs_reclaim irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock &hdev->cmd_sync_work_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 namespace_sem &rq->__lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->cmd_sync_work) irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->cmd_sync_work) &hdev->cmd_sync_work_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fs_reclaim irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci1 (work_completion)(&hdev->cmd_sync_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock chan_list_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->ident_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &base->lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &base->lock &obj_hash[i].lock irq_context: 0 namespace_sem mnt_id_ida.xa_lock pool_lock#2 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &list->lock#9 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->chan_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &base->lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_sk_list.lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &____s->seqcount irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock pool_lock#2 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &____s->seqcount irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_sk_list.lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock lock kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &n->list_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &rq->__lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock hci_cb_list_lock.wait_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &____s->seqcount irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock &c->lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->tx_work) irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->tx_work) &list->lock#9 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->tx_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->tx_work) &list->lock#6 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->tx_work) &data->read_wait irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->tx_work) &list->lock#7 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&conn->pending_rx_work) irq_context: 0 (wq_completion)hci1#2 (work_completion)(&conn->pending_rx_work) &list->lock#10 irq_context: 0 &sb->s_type->i_mutex_key#9 &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#9 &n->list_lock &c->lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &c->lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &____s->seqcount irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &list->lock#7 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) lock#6 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) lock#6 kcov_remote_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) fs_reclaim irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) (console_sem).lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 rcu_read_lock &undo_list->lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock (&timer.timer) irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) lock#6 &kcov->lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &base->lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &dentry->d_lock sysctl_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock sysctl_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &base->lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &c->lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &____s->seqcount irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &c->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock &c->lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &rq->__lock irq_context: 0 bt_proto_lock &c->lock irq_context: 0 (wq_completion)hci4 irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock &list->lock#7 irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock &list->lock#8 irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock &hdev->req_wait_q irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock &base->lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock &rq->__lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci4#2 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) &list->lock#7 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) fs_reclaim irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock &c->lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) &list->lock#6 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) &data->read_wait irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock &____s->seqcount irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock pool_lock#2 irq_context: 0 &data->open_mutex &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &list->lock#7 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) lock#6 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) lock#6 kcov_remote_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) fs_reclaim irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &c->lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &____s->seqcount irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock (&timer.timer) irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) lock#6 &kcov->lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &base->lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) (console_sem).lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 bt_proto_lock &n->list_lock irq_context: 0 bt_proto_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)hci5 irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock &list->lock#7 irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock &list->lock#8 irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock &hdev->req_wait_q irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock &base->lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock &rq->__lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &base->lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock &c->lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock &____s->seqcount irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock pool_lock#2 irq_context: 0 (wq_completion)hci5#2 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) &list->lock#7 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) fs_reclaim irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) &list->lock#6 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) &data->read_wait irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &list->lock#7 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) lock#6 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) lock#6 kcov_remote_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) fs_reclaim irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) (console_sem).lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &rq->__lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) lock#6 &kcov->lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock (&timer.timer) irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &base->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) console_owner_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) console_owner irq_context: 0 rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &base->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &rq->__lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) console_owner_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) console_owner irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock &c->lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock &____s->seqcount irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock pool_lock#2 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) kfence_freelist_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) &c->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) &____s->seqcount irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) &meta->lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &hdev->req_lock hci_sk_list.lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) fs_reclaim irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) hci_sk_list.lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->power_on) &obj_hash[i].lock irq_context: 0 &hdev->req_lock &hdev->req_wait_q irq_context: 0 &hdev->req_lock &base->lock irq_context: 0 &hdev->req_lock &base->lock &obj_hash[i].lock irq_context: 0 &hdev->req_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &hdev->req_lock (&timer.timer) irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) chan_list_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock fs_reclaim irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &c->lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &____s->seqcount irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock pool_lock#2 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &x->wait#9 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &k->list_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock lock kernfs_idr_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock bus_type_sem irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock sysfs_symlink_target_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &dev->power.lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock dpm_list_mtx irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex fs_reclaim irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex nl_table_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex nl_table_wait.lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &k->k_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock subsys mutex#81 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock subsys mutex#81 &k->k_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &list->lock#7 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock &rq->__lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock fs_reclaim irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock &hdev->cmd_sync_work_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->cmd_sync_work) irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->cmd_sync_work) &hdev->cmd_sync_work_lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock irq_context: 0 (wq_completion)hci4 (work_completion)(&hdev->cmd_sync_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &rq->__lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fs_reclaim irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &c->lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock chan_list_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->ident_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &base->lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &____s->seqcount irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock pool_lock#2 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &list->lock#9 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->chan_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->chan_lock &rq->__lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &base->lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->tx_work) irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->tx_work) &list->lock#9 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->tx_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->tx_work) &list->lock#6 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->tx_work) &data->read_wait irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->tx_work) &list->lock#7 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&conn->pending_rx_work) irq_context: 0 (wq_completion)hci4#2 (work_completion)(&conn->pending_rx_work) &list->lock#10 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_sk_list.lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &n->list_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->rx_work) &hdev->lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex dev_addr_sem &c->lock irq_context: 0 rtnl_mutex dev_addr_sem &____s->seqcount irq_context: 0 rtnl_mutex &nr_netdev_addr_lock_key irq_context: 0 &type->s_umount_key#23/1 &n->list_lock irq_context: 0 &type->s_umount_key#23/1 &n->list_lock &c->lock irq_context: 0 rtnl_mutex listen_lock irq_context: 0 rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->req_wait_q &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock &____s->seqcount irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &hdev->req_lock hci_sk_list.lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) fs_reclaim irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) hci_sk_list.lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->power_on) &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) chan_list_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock fs_reclaim irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &x->wait#9 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &c->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &k->list_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock lock kernfs_idr_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock bus_type_sem irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock remove_cache_srcu irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock sysfs_symlink_target_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &dev->power.lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock dpm_list_mtx irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex fs_reclaim irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex nl_table_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex nl_table_wait.lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &k->k_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock subsys mutex#81 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock subsys mutex#81 &k->k_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &____s->seqcount irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock pool_lock#2 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &list->lock#7 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock fs_reclaim irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock &hdev->cmd_sync_work_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->cmd_sync_work) irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->cmd_sync_work) &hdev->cmd_sync_work_lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock irq_context: 0 (wq_completion)hci5 (work_completion)(&hdev->cmd_sync_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fs_reclaim irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock chan_list_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->ident_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &base->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &c->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &____s->seqcount irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &list->lock#9 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->chan_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &base->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_sk_list.lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &n->list_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->rx_work) &hdev->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->tx_work) irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->tx_work) &list->lock#9 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->tx_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->tx_work) &list->lock#6 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->tx_work) &data->read_wait irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->tx_work) &list->lock#7 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&conn->pending_rx_work) irq_context: 0 (wq_completion)hci5#2 (work_completion)(&conn->pending_rx_work) &list->lock#10 irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &hdev->req_lock hci_sk_list.lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) fs_reclaim irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) hci_sk_list.lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->power_on) &obj_hash[i].lock irq_context: 0 &pool->lock/1 &x->wait#10 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &____s->seqcount irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) chan_list_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock fs_reclaim irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &x->wait#9 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &k->list_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock lock kernfs_idr_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock bus_type_sem irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock sysfs_symlink_target_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &c->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &dev->power.lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock dpm_list_mtx irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex fs_reclaim irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex nl_table_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex nl_table_wait.lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &k->k_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock subsys mutex#81 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock subsys mutex#81 &k->k_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &list->lock#7 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock fs_reclaim irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock &hdev->cmd_sync_work_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->cmd_sync_work) irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->cmd_sync_work) &hdev->cmd_sync_work_lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock irq_context: 0 (wq_completion)hci2 (work_completion)(&hdev->cmd_sync_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fs_reclaim irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock chan_list_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->ident_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &base->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &list->lock#9 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->chan_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &base->lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_sk_list.lock irq_context: 0 namespace_sem pcpu_alloc_mutex &rq->__lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->tx_work) irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->tx_work) &list->lock#9 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->tx_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->tx_work) &list->lock#6 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->tx_work) &data->read_wait irq_context: 0 (wq_completion)hci2#2 (work_completion)(&hdev->tx_work) &list->lock#7 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&conn->pending_rx_work) irq_context: 0 (wq_completion)hci2#2 (work_completion)(&conn->pending_rx_work) &list->lock#10 irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#3 irq_context: 0 &f->f_pos_lock sb_writers#3 sysctl_lock irq_context: 0 &f->f_pos_lock sb_writers#3 fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#3 &mm->mmap_lock irq_context: 0 &f->f_pos_lock sb_writers#3 &obj_hash[i].lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &rq->__lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) &data->read_wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex dev_addr_sem &obj_hash[i].lock irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem irq_context: 0 pernet_ops_rwsem stack_depot_init_mutex irq_context: 0 pernet_ops_rwsem crngs.lock irq_context: 0 pernet_ops_rwsem proc_inum_ida.xa_lock irq_context: 0 pernet_ops_rwsem fs_reclaim irq_context: 0 pernet_ops_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem pool_lock#2 irq_context: 0 pernet_ops_rwsem proc_subdir_lock irq_context: 0 pernet_ops_rwsem proc_subdir_lock irq_context: 0 pernet_ops_rwsem proc_inum_ida.xa_lock &c->lock irq_context: 0 pernet_ops_rwsem proc_inum_ida.xa_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem sysctl_lock irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex pcpu_lock irq_context: 0 pernet_ops_rwsem mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem &sb->s_type->i_lock_key#8 irq_context: 0 pernet_ops_rwsem &dir->lock irq_context: 0 pernet_ops_rwsem &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK k-slock-AF_NETLINK irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock rhashtable_bucket irq_context: 0 pernet_ops_rwsem k-slock-AF_NETLINK irq_context: 0 pernet_ops_rwsem nl_table_lock irq_context: 0 pernet_ops_rwsem nl_table_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem nl_table_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem nl_table_wait.lock irq_context: 0 pernet_ops_rwsem &c->lock irq_context: 0 pernet_ops_rwsem &n->list_lock irq_context: 0 pernet_ops_rwsem &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem &____s->seqcount irq_context: 0 pernet_ops_rwsem rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex irq_context: 0 pernet_ops_rwsem nl_table_lock irq_context: 0 pernet_ops_rwsem &net->rules_mod_lock irq_context: 0 pernet_ops_rwsem percpu_counters_lock irq_context: 0 pernet_ops_rwsem batched_entropy_u32.lock irq_context: 0 pernet_ops_rwsem cache_list_lock irq_context: 0 pernet_ops_rwsem tk_core.seq.seqcount irq_context: 0 pernet_ops_rwsem &k->list_lock irq_context: 0 pernet_ops_rwsem lock irq_context: 0 pernet_ops_rwsem lock kernfs_idr_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 pernet_ops_rwsem uevent_sock_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem uevent_sock_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem uevent_sock_mutex nl_table_lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem fill_pool_map-wait-type-override &rq->__lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex nl_table_wait.lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &sn->pipefs_sb_lock irq_context: 0 pernet_ops_rwsem &s->s_inode_list_lock irq_context: 0 pernet_ops_rwsem nf_hook_mutex irq_context: 0 pernet_ops_rwsem nf_hook_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem nf_hook_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem nf_connlabels_lock irq_context: 0 pernet_ops_rwsem nf_ct_ecache_mutex irq_context: 0 pernet_ops_rwsem nf_log_mutex irq_context: 0 pernet_ops_rwsem &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem ipvs->est_mutex irq_context: 0 pernet_ops_rwsem ipvs->est_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem ipvs->est_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem ipvs->est_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem ipvs->est_mutex pcpu_alloc_mutex irq_context: 0 pernet_ops_rwsem ipvs->est_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 pernet_ops_rwsem &base->lock irq_context: 0 pernet_ops_rwsem &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem &hashinfo->lock#2 irq_context: 0 pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override &n->list_lock irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem batched_entropy_u32.lock crngs.lock irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override &rq->__lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex.wait_lock irq_context: 0 pernet_ops_rwsem &p->pi_lock irq_context: 0 pernet_ops_rwsem &this->receive_lock irq_context: 0 pernet_ops_rwsem &rq->__lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &hdev->req_lock hci_sk_list.lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) fs_reclaim irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) hci_sk_list.lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->power_on) &obj_hash[i].lock irq_context: 0 rtnl_mutex dev_addr_sem &n->list_lock irq_context: 0 rtnl_mutex dev_addr_sem &n->list_lock &c->lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) chan_list_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock fs_reclaim irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &x->wait#9 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem ipvs->est_mutex &c->lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &c->lock irq_context: 0 pernet_ops_rwsem ipvs->est_mutex &n->list_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &n->list_lock irq_context: 0 pernet_ops_rwsem ipvs->est_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &k->list_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock lock kernfs_idr_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock bus_type_sem irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock sysfs_symlink_target_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &root->kernfs_rwsem irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &dev->power.lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock dpm_list_mtx irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex fs_reclaim irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex &c->lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex &____s->seqcount irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex pool_lock#2 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex nl_table_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex nl_table_wait.lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock uevent_sock_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &k->k_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock subsys mutex#81 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock subsys mutex#81 &k->k_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &list->lock#7 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock fs_reclaim irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock &hdev->cmd_sync_work_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->cmd_sync_work) irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->cmd_sync_work) &hdev->cmd_sync_work_lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock irq_context: 0 (wq_completion)hci3 (work_completion)(&hdev->cmd_sync_work) &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fs_reclaim irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock chan_list_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->ident_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &base->lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &list->lock#9 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &conn->chan_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &base->lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_sk_list.lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &c->lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->rx_work) &hdev->lock hci_cb_list_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->tx_work) irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->tx_work) &list->lock#9 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->tx_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->tx_work) &list->lock#6 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->tx_work) &data->read_wait irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->tx_work) &list->lock#7 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&conn->pending_rx_work) irq_context: 0 (wq_completion)hci3#2 (work_completion)(&conn->pending_rx_work) &list->lock#10 irq_context: 0 pernet_ops_rwsem rtnl_mutex &(&net->nexthop.notifier_chain)->rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem rtnl_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex pcpu_alloc_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &xa->xa_lock#3 irq_context: 0 pernet_ops_rwsem rtnl_mutex &xa->xa_lock#3 pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex net_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &tn->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &x->wait#9 irq_context: 0 pernet_ops_rwsem rtnl_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &k->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex lock irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex bus_type_sem irq_context: 0 pernet_ops_rwsem rtnl_mutex &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex sysfs_symlink_target_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &n->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex &dev->power.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dpm_list_mtx irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex subsys mutex#17 irq_context: 0 pernet_ops_rwsem rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &dir->lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_hotplug_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_base_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex input_pool.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex batched_entropy_u32.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &tbl->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex nl_table_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex nl_table_wait.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex proc_subdir_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex proc_subdir_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &pnettable->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex smc_ib_devices.mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 pernet_ops_rwsem rdma_nets.xa_lock irq_context: 0 pernet_ops_rwsem devices_rwsem irq_context: 0 pernet_ops_rwsem hwsim_netgroup_ida.xa_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex stack_depot_init_mutex irq_context: 0 pernet_ops_rwsem rdma_nets.xa_lock &c->lock irq_context: 0 pernet_ops_rwsem rdma_nets.xa_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex uevent_sock_mutex.wait_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem uevent_sock_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem uevent_sock_mutex.wait_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex &c->lock irq_context: 0 sb_writers#5 fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#5 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#5 fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex failover_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &n->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex dev_addr_sem &net->ipv6.fib6_gc_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem &net->ipv6.fib6_gc_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &ei->socket.wq.wait irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem uevent_sock_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex uevent_sock_mutex.wait_lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex.wait_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex quarantine_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu irq_context: 0 pernet_ops_rwsem remove_cache_srcu irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu quarantine_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem lock kernfs_idr_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem uevent_sock_mutex &c->lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: hardirq hrtimer_bases.lock fill_pool_map-wait-type-override &n->list_lock irq_context: hardirq hrtimer_bases.lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex fs_reclaim &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &meta->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex kfence_freelist_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex pcpu_alloc_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock &n->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex batched_entropy_u32.lock crngs.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-slock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-clock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &____s->seqcount irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &c->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock k-clock-AF_INET6 irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock k-clock-AF_INET6 irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &h->lhash2[i].lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &wq->mutex irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 pernet_ops_rwsem wq_pool_mutex irq_context: 0 pernet_ops_rwsem wq_pool_mutex &wq->mutex irq_context: 0 pernet_ops_rwsem pcpu_lock irq_context: 0 pernet_ops_rwsem &list->lock#4 irq_context: 0 pernet_ops_rwsem &dir->lock#2 irq_context: 0 pernet_ops_rwsem ptype_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock rhashtable_bucket irq_context: 0 pernet_ops_rwsem k-clock-AF_TIPC irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC k-slock-AF_TIPC irq_context: 0 pernet_ops_rwsem k-slock-AF_TIPC irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex &c->lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem batched_entropy_u8.lock irq_context: 0 pernet_ops_rwsem kfence_freelist_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC k-slock-AF_RXRPC irq_context: 0 pernet_ops_rwsem k-slock-AF_RXRPC irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &c->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex crngs.lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &sb->s_type->i_lock_key#8 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &dir->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 &table->hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 &table->hash[i].lock k-clock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 &table->hash[i].lock &table->hash2[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-slock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex kthread_create_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &p->pi_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &x->wait irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &c->lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &x->wait#22 irq_context: 0 &x->wait#22 &p->pi_lock &rq->__lock irq_context: 0 &x->wait#22 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &local->services_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC fs_reclaim irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC pool_lock#2 irq_context: 0 pernet_ops_rwsem &rxnet->conn_lock irq_context: 0 pernet_ops_rwsem &call->waitq irq_context: 0 pernet_ops_rwsem &rx->call_lock irq_context: 0 pernet_ops_rwsem &rxnet->call_lock irq_context: 0 pernet_ops_rwsem net_rwsem irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock sysctl_lock irq_context: 0 sb_writers#3 &____s->seqcount#11 irq_context: 0 sb_writers#3 &(&net->ipv4.ping_group_range.lock)->lock irq_context: 0 sb_writers#3 &(&net->ipv4.ping_group_range.lock)->lock &____s->seqcount#11 irq_context: 0 misc_mtx &dir->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &c->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &n->list_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &n->list_lock &c->lock irq_context: 0 rtnl_mutex &r->consumer_lock irq_context: 0 rtnl_mutex &r->consumer_lock &r->producer_lock irq_context: 0 rtnl_mutex failover_lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER pool_lock#2 irq_context: 0 rtnl_mutex &mm->mmap_lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &idev->mc_lock &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &n->list_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex &n->lock irq_context: 0 rtnl_mutex &n->lock &(&n->ha_lock)->lock irq_context: 0 rtnl_mutex &n->lock &(&n->ha_lock)->lock &____s->seqcount#9 irq_context: 0 rtnl_mutex &tbl->lock &n->lock irq_context: 0 rtnl_mutex rcu_read_lock lock#8 irq_context: 0 rtnl_mutex rcu_read_lock id_table_lock irq_context: 0 rtnl_mutex &n->lock irq_context: 0 rtnl_mutex &n->lock &____s->seqcount#9 irq_context: 0 rtnl_mutex &tbl->lock pool_lock#2 irq_context: 0 rtnl_mutex &tbl->lock batched_entropy_u32.lock irq_context: 0 rtnl_mutex &tbl->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &ndev->lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock &n->lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock &n->lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock &n->lock &____s->seqcount#9 irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock nl_table_lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock nl_table_wait.lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock rcu_read_lock lock#8 irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock rcu_read_lock id_table_lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock &dir->lock#2 irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock krc.lock irq_context: 0 rtnl_mutex _xmit_ETHER pool_lock#2 irq_context: 0 rtnl_mutex &ndev->lock pool_lock#2 irq_context: 0 rtnl_mutex &ndev->lock &dir->lock#2 irq_context: 0 rtnl_mutex &ndev->lock pcpu_lock irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock irq_context: 0 rtnl_mutex &ndev->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock pool_lock#2 irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock &____s->seqcount irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock nl_table_lock irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock nl_table_wait.lock irq_context: 0 rtnl_mutex &ndev->lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &ndev->lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &ndev->lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex &ndev->lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &ndev->lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &ndev->lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_read_lock &n->list_lock irq_context: 0 rtnl_mutex rcu_read_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex pcpu_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock batched_entropy_u32.lock crngs.lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &ndev->lock &c->lock irq_context: 0 rtnl_mutex &ndev->lock &n->list_lock irq_context: 0 rtnl_mutex &ndev->lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock &c->lock irq_context: 0 rtnl_mutex &tb->tb6_lock &n->list_lock irq_context: 0 rtnl_mutex &tb->tb6_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key pool_lock#2 irq_context: 0 rtnl_mutex (switchdev_blocking_notif_chain).rwsem irq_context: 0 rtnl_mutex &br->hash_lock irq_context: 0 rtnl_mutex &br->hash_lock &____s->seqcount irq_context: 0 rtnl_mutex &br->hash_lock pool_lock#2 irq_context: 0 rtnl_mutex &br->hash_lock &c->lock irq_context: 0 rtnl_mutex &br->hash_lock rcu_read_lock rhashtable_bucket irq_context: 0 rtnl_mutex &br->hash_lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex &br->hash_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &br->hash_lock nl_table_lock irq_context: 0 rtnl_mutex &br->hash_lock nl_table_wait.lock irq_context: 0 rtnl_mutex rcu_read_lock rhashtable_bucket irq_context: 0 rtnl_mutex nf_hook_mutex irq_context: 0 rtnl_mutex nf_hook_mutex fs_reclaim irq_context: 0 rtnl_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex nf_hook_mutex pool_lock#2 irq_context: 0 rtnl_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 rtnl_mutex nf_hook_mutex &c->lock irq_context: 0 rtnl_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 rtnl_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override &pcp->lock &zone->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &c->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex j1939_netdev_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock &c->lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 rtnl_mutex uevent_sock_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: softirq (&rxnet->peer_keepalive_timer) rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key pool_lock#2 irq_context: softirq &(&idev->mc_ifc_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex.wait_lock irq_context: 0 (wq_completion)events (linkwatch_work).work &p->pi_lock irq_context: 0 (wq_completion)events (linkwatch_work).work &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work &rq->__lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#2 irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#2 pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &____s->seqcount irq_context: 0 rtnl_mutex nf_hook_mutex &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex &rq->__lock irq_context: 0 rtnl_mutex dev_addr_sem &rq->__lock irq_context: 0 rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER &____s->seqcount irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq &(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->timer rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq &(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->timer rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: softirq &(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->timer rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: softirq &(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->timer rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex &bat_priv->tvlv.handler_list_lock irq_context: 0 rtnl_mutex &bat_priv->tvlv.handler_list_lock pool_lock#2 irq_context: 0 rtnl_mutex &bat_priv->tvlv.container_list_lock irq_context: 0 rtnl_mutex &bat_priv->tvlv.handler_list_lock &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &c->lock irq_context: 0 &mm->mmap_lock rcu_read_lock pgd_lock irq_context: 0 &mm->mmap_lock rcu_read_lock key irq_context: 0 &mm->mmap_lock rcu_read_lock pcpu_lock irq_context: 0 &mm->mmap_lock rcu_read_lock percpu_counters_lock irq_context: 0 rtnl_mutex &idev->mc_lock &batadv_netdev_addr_lock_key irq_context: 0 rtnl_mutex &idev->mc_lock &batadv_netdev_addr_lock_key pool_lock#2 irq_context: 0 rtnl_mutex &bat_priv->softif_vlan_list_lock irq_context: 0 rtnl_mutex &bat_priv->softif_vlan_list_lock pool_lock#2 irq_context: 0 rtnl_mutex key#16 irq_context: 0 rtnl_mutex &bat_priv->tt.changes_list_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: softirq &(&bat_priv->nc.work)->timer irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) key#17 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) key#18 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &base->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 rtnl_mutex kernfs_idr_lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 rtnl_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 rtnl_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 &vma->vm_lock->lock &rcu_state.expedited_wq irq_context: 0 &vma->vm_lock->lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &vma->vm_lock->lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &vma->vm_lock->lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex cpu_hotplug_lock xps_map_mutex irq_context: 0 rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex tk_core.seq.seqcount irq_context: 0 rtnl_mutex &wq->mutex irq_context: 0 rtnl_mutex &wq->mutex &pool->lock irq_context: 0 rtnl_mutex wq_pool_mutex &wq->mutex &pool->lock irq_context: 0 rtnl_mutex wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 rtnl_mutex init_lock irq_context: 0 rtnl_mutex init_lock slab_mutex irq_context: 0 rtnl_mutex init_lock slab_mutex fs_reclaim irq_context: 0 rtnl_mutex init_lock slab_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex init_lock slab_mutex pool_lock#2 irq_context: 0 rtnl_mutex init_lock slab_mutex &c->lock irq_context: 0 rtnl_mutex init_lock slab_mutex &n->list_lock irq_context: 0 rtnl_mutex init_lock slab_mutex pcpu_alloc_mutex irq_context: 0 rtnl_mutex init_lock slab_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 rtnl_mutex init_lock slab_mutex &root->kernfs_rwsem irq_context: 0 rtnl_mutex init_lock slab_mutex &k->list_lock irq_context: 0 rtnl_mutex init_lock slab_mutex lock irq_context: 0 rtnl_mutex init_lock slab_mutex lock kernfs_idr_lock irq_context: 0 rtnl_mutex init_lock slab_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 rtnl_mutex init_lock slab_mutex &____s->seqcount irq_context: 0 rtnl_mutex init_lock fs_reclaim irq_context: 0 rtnl_mutex init_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex init_lock &zone->lock irq_context: 0 rtnl_mutex init_lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex init_lock &____s->seqcount irq_context: 0 rtnl_mutex init_lock pool_lock#2 irq_context: 0 rtnl_mutex init_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex init_lock &base->lock irq_context: 0 rtnl_mutex init_lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex init_lock crngs.lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex net_rwsem &rq->__lock irq_context: 0 tomoyo_ss pgd_lock irq_context: 0 tomoyo_ss key irq_context: 0 tomoyo_ss pcpu_lock irq_context: 0 tomoyo_ss percpu_counters_lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock pool_lock#2 irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 rtnl_mutex deferred_lock irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events deferred_process_work irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex &pool->lock irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex pcpu_alloc_mutex &rq->__lock irq_context: 0 rtnl_mutex target_list_lock irq_context: 0 rtnl_mutex rcu_read_lock _xmit_ETHER irq_context: 0 rtnl_mutex &br->lock irq_context: 0 rtnl_mutex &br->lock &br->hash_lock irq_context: 0 rtnl_mutex &br->lock &br->hash_lock rcu_read_lock rhashtable_bucket irq_context: 0 rtnl_mutex &br->lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &br->lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &br->lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex &br->lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &br->lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &br->lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &br->lock &br->hash_lock pool_lock#2 irq_context: 0 rtnl_mutex &br->lock &br->hash_lock nl_table_lock irq_context: 0 rtnl_mutex &br->lock &br->hash_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &br->lock &br->hash_lock nl_table_wait.lock irq_context: 0 rtnl_mutex &br->lock &br->hash_lock &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex &br->lock lweventlist_lock irq_context: 0 rtnl_mutex &br->lock lweventlist_lock pool_lock#2 irq_context: 0 rtnl_mutex &br->lock lweventlist_lock &dir->lock#2 irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex &pn->hash_lock irq_context: 0 rtnl_mutex &net->ipv6.fib6_gc_lock rcu_read_lock &tb->tb6_lock irq_context: 0 rtnl_mutex &net->ipv6.fib6_gc_lock rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 rtnl_mutex &net->ipv6.fib6_gc_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex deferred_lock irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex (switchdev_blocking_notif_chain).rwsem irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex pool_lock#2 irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex &dir->lock#2 irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex.wait_lock irq_context: 0 (wq_completion)events deferred_process_work &p->pi_lock irq_context: 0 (wq_completion)events deferred_process_work &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events deferred_process_work &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: 0 rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER &____s->seqcount irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &meta->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond0 irq_context: 0 (wq_completion)bond0 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond0 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond0 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond0 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override pool_lock irq_context: softirq &(&slave->notify_work)->timer irq_context: softirq &(&slave->notify_work)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&slave->notify_work)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&slave->notify_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex lweventlist_lock &c->lock irq_context: 0 rtnl_mutex &br->lock &br->hash_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&slave->notify_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&slave->notify_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&bat_priv->mcast.work)->timer irq_context: softirq &(&bat_priv->mcast.work)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&bat_priv->mcast.work)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&bat_priv->mcast.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq &(&bat_priv->mcast.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&bat_priv->mcast.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &bat_priv->mcast.mla_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &bat_priv->mcast.mla_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &bat_priv->mcast.mla_lock key#16 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &bat_priv->mcast.mla_lock &bat_priv->tt.changes_list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &bat_priv->mcast.mla_lock &bat_priv->tvlv.container_list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &base->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &n->list_lock &c->lock irq_context: 0 rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex batched_entropy_u32.lock crngs.lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex &rcu_state.expedited_wq irq_context: 0 rtnl_mutex rcu_state.exp_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key irq_context: 0 rtnl_mutex team->team_lock_key fs_reclaim irq_context: 0 rtnl_mutex team->team_lock_key fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex team->team_lock_key pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key netpoll_srcu irq_context: 0 rtnl_mutex team->team_lock_key net_rwsem irq_context: 0 rtnl_mutex team->team_lock_key net_rwsem &list->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key &tn->lock irq_context: 0 rtnl_mutex team->team_lock_key _xmit_ETHER irq_context: 0 rtnl_mutex team->team_lock_key &dir->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key input_pool.lock irq_context: 0 rtnl_mutex team->team_lock_key &c->lock irq_context: 0 rtnl_mutex team->team_lock_key rcu_read_lock &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key &____s->seqcount irq_context: 0 rtnl_mutex team->team_lock_key &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key nl_table_lock irq_context: 0 rtnl_mutex team->team_lock_key nl_table_wait.lock irq_context: 0 rtnl_mutex team->team_lock_key rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex team->team_lock_key rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex team->team_lock_key &in_dev->mc_tomb_lock irq_context: 0 rtnl_mutex team->team_lock_key &im->lock irq_context: 0 rtnl_mutex team->team_lock_key _xmit_ETHER pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key cbs_list_lock irq_context: 0 rtnl_mutex team->team_lock_key &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key sysfs_symlink_target_lock irq_context: 0 rtnl_mutex team->team_lock_key lock irq_context: 0 rtnl_mutex team->team_lock_key lock kernfs_idr_lock irq_context: 0 rtnl_mutex team->team_lock_key &root->kernfs_rwsem irq_context: 0 rtnl_mutex team->team_lock_key &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 rtnl_mutex team->team_lock_key rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key &n->list_lock irq_context: 0 rtnl_mutex team->team_lock_key &n->list_lock &c->lock irq_context: 0 rtnl_mutex team->team_lock_key lweventlist_lock irq_context: 0 rtnl_mutex team->team_lock_key lweventlist_lock pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key lweventlist_lock &dir->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key (console_sem).lock irq_context: 0 rtnl_mutex team->team_lock_key console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex team->team_lock_key console_lock console_srcu console_owner irq_context: 0 rtnl_mutex team->team_lock_key console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex team->team_lock_key console_lock console_srcu console_owner console_owner_lock irq_context: 0 rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex &meta->lock irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex kfence_freelist_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock &____s->seqcount irq_context: 0 (wq_completion)bond0#2 irq_context: 0 (wq_completion)bond0#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond0#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond0#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond0#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond0#3 irq_context: 0 (wq_completion)bond0#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond0#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond0#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond0#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond0#4 irq_context: 0 (wq_completion)bond0#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond0#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond0#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond0#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 tomoyo_ss &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key fs_reclaim irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key nl_table_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key nl_table_wait.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key net_rwsem irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key net_rwsem &list->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key &tn->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock kfence_freelist_lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) &rq->__lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex crngs.lock irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex pool_lock#2 irq_context: 0 rtnl_mutex ptype_lock irq_context: softirq &(&bat_priv->orig_work)->timer irq_context: softirq &(&bat_priv->orig_work)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&bat_priv->orig_work)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&bat_priv->orig_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) key#19 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) &base->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex batched_entropy_u8.lock irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock &base->lock irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex remove_cache_srcu &rq->__lock irq_context: 0 rtnl_mutex remove_cache_srcu &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 (wq_completion)bond0#5 irq_context: 0 (wq_completion)bond0#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond0#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond0#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond0#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond0#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond0#5 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond0 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond0 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond0 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond0 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond0 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond0#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond0#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond0#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond0#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond0#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond0#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond0#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond0#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond0#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond0#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond0#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond0#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond0#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond0#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond0#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) fill_pool_map-wait-type-override pool_lock irq_context: softirq &(&bat_priv->orig_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&bat_priv->orig_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq drivers/net/wireguard/ratelimiter.c:20 irq_context: softirq drivers/net/wireguard/ratelimiter.c:20 rcu_read_lock &pool->lock irq_context: softirq drivers/net/wireguard/ratelimiter.c:20 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq drivers/net/wireguard/ratelimiter.c:20 rcu_read_lock &pool->lock &obj_hash[i].lock pool_lock irq_context: softirq drivers/net/wireguard/ratelimiter.c:20 rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq drivers/net/wireguard/ratelimiter.c:20 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq drivers/net/wireguard/ratelimiter.c:20 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (gc_work).work irq_context: 0 (wq_completion)events_power_efficient (gc_work).work tk_core.seq.seqcount irq_context: 0 (wq_completion)events_power_efficient (gc_work).work "ratelimiter_table_lock" irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex &c->lock irq_context: 0 rtnl_mutex _xmit_NONE irq_context: 0 rtnl_mutex &tb->tb6_lock &pcp->lock &zone->lock irq_context: 0 rtnl_mutex &tb->tb6_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex lock#9 irq_context: 0 (wq_completion)events_power_efficient (gc_work).work &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (gc_work).work &base->lock irq_context: 0 (wq_completion)events_power_efficient (gc_work).work &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#2 irq_context: 0 rtnl_mutex team->team_lock_key#2 fs_reclaim irq_context: 0 rtnl_mutex team->team_lock_key#2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex team->team_lock_key#2 netpoll_srcu irq_context: 0 rtnl_mutex team->team_lock_key#2 net_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#2 net_rwsem &list->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#2 &tn->lock irq_context: 0 rtnl_mutex team->team_lock_key#2 _xmit_ETHER irq_context: 0 rtnl_mutex team->team_lock_key#2 &dir->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#2 input_pool.lock irq_context: 0 rtnl_mutex team->team_lock_key#2 rcu_read_lock &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key#2 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#2 nl_table_lock irq_context: 0 rtnl_mutex team->team_lock_key#2 nl_table_wait.lock irq_context: 0 rtnl_mutex team->team_lock_key#2 rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex team->team_lock_key#2 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#2 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex team->team_lock_key#2 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#2 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#2 &in_dev->mc_tomb_lock irq_context: 0 rtnl_mutex team->team_lock_key#2 &im->lock irq_context: 0 rtnl_mutex team->team_lock_key#2 cbs_list_lock irq_context: 0 rtnl_mutex team->team_lock_key#2 &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key#2 sysfs_symlink_target_lock irq_context: 0 rtnl_mutex team->team_lock_key#2 lock irq_context: 0 rtnl_mutex team->team_lock_key#2 lock kernfs_idr_lock irq_context: 0 rtnl_mutex team->team_lock_key#2 lock kernfs_idr_lock pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#2 &root->kernfs_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#2 &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#2 &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#2 &____s->seqcount irq_context: 0 rtnl_mutex team->team_lock_key#2 lweventlist_lock irq_context: 0 rtnl_mutex team->team_lock_key#2 lweventlist_lock &dir->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#2 (console_sem).lock irq_context: 0 rtnl_mutex team->team_lock_key#2 console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex team->team_lock_key#2 console_lock console_srcu console_owner irq_context: 0 rtnl_mutex team->team_lock_key#2 console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex team->team_lock_key#2 console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)bond0#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond0#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond0#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#3 irq_context: 0 rtnl_mutex team->team_lock_key#3 fs_reclaim irq_context: 0 rtnl_mutex team->team_lock_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex team->team_lock_key#3 netpoll_srcu irq_context: 0 rtnl_mutex team->team_lock_key#3 net_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#3 net_rwsem &list->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#3 &tn->lock irq_context: 0 rtnl_mutex team->team_lock_key#3 _xmit_ETHER irq_context: 0 rtnl_mutex team->team_lock_key#3 &dir->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#3 input_pool.lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &____s->seqcount irq_context: 0 rtnl_mutex team->team_lock_key#3 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#3 nl_table_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 nl_table_wait.lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#3 &in_dev->mc_tomb_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &im->lock irq_context: 0 rtnl_mutex team->team_lock_key#3 cbs_list_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key#3 sysfs_symlink_target_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 lock irq_context: 0 rtnl_mutex team->team_lock_key#3 lock kernfs_idr_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &root->kernfs_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#3 &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#3 &pcp->lock &zone->lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex team->team_lock_key#3 lweventlist_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 lweventlist_lock &dir->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#3 (console_sem).lock irq_context: 0 rtnl_mutex team->team_lock_key#3 console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 console_lock console_srcu console_owner irq_context: 0 rtnl_mutex team->team_lock_key#3 console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex team->team_lock_key#3 console_lock console_srcu console_owner console_owner_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &rq->__lock irq_context: 0 (wq_completion)bond0#6 irq_context: 0 (wq_completion)bond0#6 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond0#6 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond0#6 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond0#6 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond0#6 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#12 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock batched_entropy_u32.lock irq_context: 0 rtnl_mutex team->team_lock_key#4 irq_context: 0 rtnl_mutex team->team_lock_key#4 fs_reclaim irq_context: 0 rtnl_mutex team->team_lock_key#4 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex team->team_lock_key#4 netpoll_srcu irq_context: 0 rtnl_mutex team->team_lock_key#4 net_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#4 net_rwsem &list->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#4 &tn->lock irq_context: 0 rtnl_mutex team->team_lock_key#4 _xmit_ETHER irq_context: 0 rtnl_mutex team->team_lock_key#4 &dir->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#4 input_pool.lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key#4 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#4 nl_table_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 nl_table_wait.lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 &in_dev->mc_tomb_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 &im->lock irq_context: 0 rtnl_mutex team->team_lock_key#4 cbs_list_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key#4 sysfs_symlink_target_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 lock irq_context: 0 rtnl_mutex team->team_lock_key#4 lock kernfs_idr_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 &root->kernfs_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#4 &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#4 &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#4 &pcp->lock &zone->lock irq_context: 0 rtnl_mutex team->team_lock_key#4 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex team->team_lock_key#4 &____s->seqcount irq_context: 0 rtnl_mutex team->team_lock_key#4 &n->list_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 &n->list_lock &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#4 lweventlist_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 lweventlist_lock &dir->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#4 (console_sem).lock irq_context: 0 rtnl_mutex team->team_lock_key#4 console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 console_lock console_srcu console_owner irq_context: 0 rtnl_mutex team->team_lock_key#4 console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex team->team_lock_key#4 console_lock console_srcu console_owner console_owner_lock irq_context: 0 rtnl_mutex uevent_sock_mutex quarantine_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#3 &n->list_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &n->list_lock &c->lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#4 pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#4 &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#5 irq_context: 0 rtnl_mutex team->team_lock_key#5 fs_reclaim irq_context: 0 rtnl_mutex team->team_lock_key#5 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex team->team_lock_key#5 netpoll_srcu irq_context: 0 rtnl_mutex team->team_lock_key#5 net_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#5 net_rwsem &list->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#5 &tn->lock irq_context: 0 rtnl_mutex team->team_lock_key#5 _xmit_ETHER irq_context: 0 rtnl_mutex team->team_lock_key#5 &dir->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#5 &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#5 input_pool.lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &____s->seqcount irq_context: 0 rtnl_mutex team->team_lock_key#5 pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#5 nl_table_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 nl_table_wait.lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &in_dev->mc_tomb_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &im->lock irq_context: 0 rtnl_mutex team->team_lock_key#5 cbs_list_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key#5 sysfs_symlink_target_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 lock irq_context: 0 rtnl_mutex team->team_lock_key#5 lock kernfs_idr_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &root->kernfs_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#5 &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#5 lweventlist_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 lweventlist_lock &dir->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#5 (console_sem).lock irq_context: 0 rtnl_mutex team->team_lock_key#5 console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 console_lock console_srcu console_owner irq_context: 0 rtnl_mutex team->team_lock_key#5 console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex team->team_lock_key#5 console_lock console_srcu console_owner console_owner_lock irq_context: 0 rtnl_mutex proc_inum_ida.xa_lock pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#6 irq_context: 0 rtnl_mutex team->team_lock_key#6 fs_reclaim irq_context: 0 rtnl_mutex team->team_lock_key#6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex team->team_lock_key#6 netpoll_srcu irq_context: 0 rtnl_mutex team->team_lock_key#6 net_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#6 net_rwsem &list->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#6 &tn->lock irq_context: 0 rtnl_mutex team->team_lock_key#6 _xmit_ETHER irq_context: 0 rtnl_mutex team->team_lock_key#6 &dir->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#6 input_pool.lock irq_context: 0 rtnl_mutex team->team_lock_key#6 rcu_read_lock &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key#6 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#6 nl_table_lock irq_context: 0 rtnl_mutex team->team_lock_key#6 nl_table_wait.lock irq_context: 0 rtnl_mutex team->team_lock_key#6 rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex team->team_lock_key#6 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#6 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex team->team_lock_key#6 &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#6 &in_dev->mc_tomb_lock irq_context: 0 rtnl_mutex team->team_lock_key#6 &im->lock irq_context: 0 rtnl_mutex team->team_lock_key#6 cbs_list_lock irq_context: 0 rtnl_mutex team->team_lock_key#6 &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key#6 sysfs_symlink_target_lock irq_context: 0 rtnl_mutex team->team_lock_key#6 lock irq_context: 0 rtnl_mutex team->team_lock_key#6 lock kernfs_idr_lock irq_context: 0 rtnl_mutex team->team_lock_key#6 &root->kernfs_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#6 &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 rtnl_mutex team->team_lock_key#6 &____s->seqcount irq_context: 0 rtnl_mutex team->team_lock_key#6 lweventlist_lock irq_context: 0 rtnl_mutex team->team_lock_key#6 lweventlist_lock &dir->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#6 &pcp->lock &zone->lock irq_context: 0 rtnl_mutex team->team_lock_key#6 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex team->team_lock_key#6 (console_sem).lock irq_context: 0 rtnl_mutex team->team_lock_key#6 console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex team->team_lock_key#6 console_lock console_srcu console_owner irq_context: 0 rtnl_mutex team->team_lock_key#6 console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex team->team_lock_key#6 console_lock console_srcu console_owner console_owner_lock irq_context: softirq &(&slave->notify_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 quarantine_lock irq_context: softirq &(&hdev->cmd_timer)->timer irq_context: softirq &(&hdev->cmd_timer)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&hdev->cmd_timer)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&hdev->cmd_timer)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq &(&hdev->cmd_timer)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&hdev->cmd_timer)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#5 &rq->__lock irq_context: 0 rtnl_mutex &hsr->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) (console_sem).lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) console_owner_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) console_owner irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_node_0 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) &rq->__lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex pin_fs_lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex team->team_lock_key#6 &rq->__lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) (console_sem).lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&hdev->cmd_timer)->work) &rq->__lock irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex &hard_iface->bat_iv.ogm_buff_mutex &____s->seqcount irq_context: 0 rtnl_mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex lock kernfs_idr_lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex lock kernfs_idr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) (console_sem).lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) console_owner_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) console_owner irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex console_owner_lock irq_context: 0 rtnl_mutex console_owner irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) &rq->__lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &meta->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) kfence_freelist_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) (console_sem).lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) console_owner_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) console_owner irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) (console_sem).lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) console_owner_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) console_owner irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) &rq->__lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) (console_sem).lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_node_0 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) &rq->__lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(work) lock#4 rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(work) lock#4 &obj_hash[i].lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock batched_entropy_u32.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex gdp_mutex fs_reclaim irq_context: 0 rtnl_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex gdp_mutex pool_lock#2 irq_context: 0 rtnl_mutex gdp_mutex lock irq_context: 0 rtnl_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 rtnl_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 rtnl_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 rtnl_mutex gdp_mutex kobj_ns_type_lock irq_context: 0 rtnl_mutex &k->k_lock irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock &dentry->d_lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 (console_sem).lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 console_lock console_srcu console_owner irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 console_lock console_srcu console_owner console_owner_lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock mount_lock irq_context: 0 rtnl_mutex rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 rtnl_mutex mount_lock irq_context: 0 rtnl_mutex mount_lock mount_lock.seqcount irq_context: 0 rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key irq_context: 0 rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 fs_reclaim irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 &n->list_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 quarantine_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 nl_table_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 nl_table_wait.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 net_rwsem irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 net_rwsem &list->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 &tn->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &n->list_lock &c->lock irq_context: 0 remove_cache_srcu rcu_node_0 irq_context: 0 remove_cache_srcu &rcu_state.expedited_wq irq_context: 0 remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock irq_context: 0 remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#2 fs_reclaim irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#2 rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#2 &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#2 nl_table_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#2 nl_table_wait.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#2 net_rwsem irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#2 net_rwsem &list->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#2 &tn->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#2 &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 fs_reclaim irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 &n->list_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 &n->list_lock &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 nl_table_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 nl_table_wait.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 net_rwsem irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 net_rwsem &list->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 &tn->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#5 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#5 fs_reclaim irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#5 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#5 &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#5 rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#5 &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#5 nl_table_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#5 nl_table_wait.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#5 net_rwsem irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#5 net_rwsem &list->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#5 &tn->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#5 &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#5 pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#6 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#6 fs_reclaim irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#6 rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#6 &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#6 &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#6 &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#6 nl_table_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#6 nl_table_wait.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#6 net_rwsem irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#6 net_rwsem &list->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#6 &tn->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#6 &n->list_lock irq_context: 0 rtnl_mutex lweventlist_lock &____s->seqcount irq_context: softirq &(&forw_packet_aggr->delayed_work)->timer irq_context: softirq &(&forw_packet_aggr->delayed_work)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&forw_packet_aggr->delayed_work)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&forw_packet_aggr->delayed_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq &(&forw_packet_aggr->delayed_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&forw_packet_aggr->delayed_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock &base->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &bat_priv->forw_bat_list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) pool_lock#2 irq_context: softirq &(&bat_priv->orig_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: softirq (&app->join_timer) irq_context: softirq (&app->join_timer) &app->lock irq_context: softirq (&app->join_timer) &list->lock#11 irq_context: softirq (&app->join_timer) &app->lock batched_entropy_u32.lock irq_context: softirq (&app->join_timer) &app->lock &obj_hash[i].lock irq_context: softirq (&app->join_timer) &app->lock &base->lock irq_context: softirq (&app->join_timer) &app->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &____s->seqcount irq_context: softirq (&app->join_timer)#2 irq_context: softirq (&app->join_timer)#2 &app->lock#2 irq_context: softirq (&app->join_timer)#2 &list->lock#12 irq_context: softirq (&app->join_timer)#2 batched_entropy_u32.lock irq_context: softirq (&app->join_timer)#2 &obj_hash[i].lock irq_context: softirq (&app->join_timer)#2 &base->lock irq_context: softirq (&app->join_timer)#2 &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &idev->mc_lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key irq_context: 0 rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key pool_lock#2 irq_context: 0 rtnl_mutex uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex _xmit_ETHER &c->lock irq_context: 0 rtnl_mutex _xmit_ETHER &____s->seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &n->list_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &n->list_lock &c->lock irq_context: softirq &(&bat_priv->orig_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#3 irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#3 pool_lock#2 irq_context: 0 rtnl_mutex &xa->xa_lock#14 irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_node_0 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#3/1 irq_context: softirq (&app->join_timer)#2 batched_entropy_u32.lock crngs.lock irq_context: 0 rtnl_mutex &rcu_state.expedited_wq irq_context: 0 rtnl_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key &____s->seqcount irq_context: 0 rtnl_mutex rcu_read_lock &tap_major->minor_lock irq_context: 0 rtnl_mutex rcu_read_lock &tap_major->minor_lock pool_lock#2 irq_context: 0 rtnl_mutex req_lock irq_context: 0 rtnl_mutex &x->wait#11 irq_context: 0 rtnl_mutex subsys mutex#82 irq_context: 0 rtnl_mutex subsys mutex#82 &k->k_lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#3 &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#3 &____s->seqcount irq_context: 0 rtnl_mutex &xa->xa_lock#3 &c->lock irq_context: 0 kn->active#51 fs_reclaim irq_context: 0 kn->active#51 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#51 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#51 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#51 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &sem->wait_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &sem->wait_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &p->pi_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#52 fs_reclaim irq_context: 0 kn->active#52 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#52 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#52 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#52 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock nsim_bus_dev_ids.xa_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &x->wait#9 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &k->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock lock kernfs_idr_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &sem->wait_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock bus_type_sem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock sysfs_symlink_target_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &k->k_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &root->kernfs_rwsem &sem->wait_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->power.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock dpm_list_mtx irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex uevent_sock_mutex.wait_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex.wait_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &k->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &k->k_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex device_links_srcu irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &dev->power.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex fwnode_link_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex device_links_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex sysfs_symlink_target_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex lock kernfs_idr_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex devlinks.xa_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key crngs.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key devlinks.xa_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &xa->xa_lock#15 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &xa->xa_lock#15 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key pcpu_alloc_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rcu_node_0 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &rcu_state.expedited_wq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &base->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key pin_fs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key batched_entropy_u32.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex lweventlist_lock &n->list_lock irq_context: 0 rtnl_mutex lweventlist_lock &n->list_lock &c->lock irq_context: 0 kn->active#51 &c->lock irq_context: 0 kn->active#51 &____s->seqcount irq_context: 0 kn->active#51 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#51 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock nsim_bus_dev_list_lock.wait_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &(&net->nexthop.notifier_chain)->rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex.wait_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &xa->xa_lock#3 &____s->seqcount irq_context: 0 rtnl_mutex pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rcu_read_lock &data->fib_event_queue_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rcu_read_lock &tb->tb6_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rcu_read_lock &tb->tb6_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rcu_read_lock &tb->tb6_lock &data->fib_event_queue_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rcu_read_lock &tb->tb6_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &(&fn_net->fib_chain)->lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_event_queue_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &devlink_port->type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key stack_depot_init_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex bpf_devs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex bpf_devs_lock fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex bpf_devs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex bpf_devs_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex bpf_devs_lock &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex bpf_devs_lock &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex bpf_devs_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex bpf_devs_lock rcu_read_lock rhashtable_bucket irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex pin_fs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &xa->xa_lock#3 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &base->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex (work_completion)(&(&devlink_port->type_warn_dw)->work) irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &devlink_port->type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex net_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &tn->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &x->wait#9 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &k->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex gdp_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex gdp_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex gdp_mutex pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex gdp_mutex lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex gdp_mutex kobj_ns_type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex lock kernfs_idr_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex bus_type_sem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex sysfs_symlink_target_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &dev->power.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex dpm_list_mtx irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex uevent_sock_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &k->k_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex subsys mutex#17 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &dir->lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex dev_hotplug_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex dev_base_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex input_pool.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex batched_entropy_u32.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &tbl->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex sysctl_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex failover_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex pcpu_alloc_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex rcu_read_lock &pool->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex proc_subdir_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex proc_subdir_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &idev->mc_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &idev->mc_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &idev->mc_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &idev->mc_lock _xmit_ETHER pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &pnettable->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex smc_ib_devices.mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &vn->sock_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock (&timer.timer) irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex deferred_probe_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex rcu_read_lock &pool->lock/1 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex probe_waitqueue.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock subsys mutex#83 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock.wait_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 nsim_bus_dev_list_lock.wait_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#51 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex devlinks.xa_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 crngs.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 devlinks.xa_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &xa->xa_lock#15 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 pcpu_alloc_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &base->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 pin_fs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &devlink->lock_key irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &devlink->lock_key &devlink_port->type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 batched_entropy_u32.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &(&net->nexthop.notifier_chain)->rwsem irq_context: 0 rtnl_mutex devnet_rename_sem irq_context: 0 rtnl_mutex devnet_rename_sem (console_sem).lock irq_context: 0 rtnl_mutex devnet_rename_sem console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex devnet_rename_sem console_lock console_srcu console_owner irq_context: 0 rtnl_mutex devnet_rename_sem console_lock console_srcu console_owner &port_lock_key irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex devnet_rename_sem console_lock console_srcu console_owner console_owner_lock irq_context: 0 rtnl_mutex devnet_rename_sem &rq->__lock irq_context: 0 rtnl_mutex devnet_rename_sem fs_reclaim irq_context: 0 rtnl_mutex devnet_rename_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: softirq &(&nsim_dev->trap_data->trap_report_dw)->timer irq_context: 0 rtnl_mutex devnet_rename_sem pool_lock#2 irq_context: softirq &(&nsim_dev->trap_data->trap_report_dw)->timer rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex devnet_rename_sem &k->list_lock irq_context: softirq &(&nsim_dev->trap_data->trap_report_dw)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex devnet_rename_sem &root->kernfs_rwsem irq_context: softirq &(&nsim_dev->trap_data->trap_report_dw)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex devnet_rename_sem &root->kernfs_rwsem irq_context: 0 rtnl_mutex devnet_rename_sem &root->kernfs_rwsem fs_reclaim irq_context: 0 rtnl_mutex devnet_rename_sem &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex devnet_rename_sem &root->kernfs_rwsem pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &base->lock irq_context: 0 rtnl_mutex devnet_rename_sem &root->kernfs_rwsem kernfs_rename_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex devnet_rename_sem &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 rtnl_mutex devnet_rename_sem kernfs_rename_lock irq_context: 0 rtnl_mutex devnet_rename_sem uevent_sock_mutex irq_context: 0 rtnl_mutex devnet_rename_sem uevent_sock_mutex fs_reclaim irq_context: 0 rtnl_mutex devnet_rename_sem uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex devnet_rename_sem uevent_sock_mutex pool_lock#2 irq_context: 0 rtnl_mutex devnet_rename_sem uevent_sock_mutex nl_table_lock irq_context: 0 rtnl_mutex devnet_rename_sem uevent_sock_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex devnet_rename_sem uevent_sock_mutex nl_table_wait.lock irq_context: 0 rtnl_mutex devnet_rename_sem &obj_hash[i].lock irq_context: 0 rtnl_mutex (work_completion)(&(&devlink_port->type_warn_dw)->work) irq_context: 0 rtnl_mutex &devlink_port->type_lock irq_context: 0 rtnl_mutex sysctl_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex sysctl_lock pool_lock#2 irq_context: 0 rtnl_mutex sysctl_lock krc.lock irq_context: 0 rtnl_mutex &nft_net->commit_mutex irq_context: 0 rtnl_mutex &ent->pde_unload_lock irq_context: 0 rtnl_mutex sysctl_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex.wait_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rcu_read_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rcu_read_lock &data->fib_event_queue_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock &data->fib_event_queue_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rcu_read_lock &tb->tb6_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rcu_read_lock &tb->tb6_lock &data->fib_event_queue_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &(&fn_net->fib_chain)->lock irq_context: 0 rtnl_mutex sysctl_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex sysctl_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex sysctl_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex sysctl_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &devlink_port->type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 stack_depot_init_mutex irq_context: 0 rtnl_mutex devnet_rename_sem batched_entropy_u8.lock irq_context: 0 rtnl_mutex devnet_rename_sem kfence_freelist_lock irq_context: 0 rtnl_mutex devnet_rename_sem &meta->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex bpf_devs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex bpf_devs_lock fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex bpf_devs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex bpf_devs_lock rcu_read_lock rhashtable_bucket irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex pin_fs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &xa->xa_lock#3 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &base->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex (work_completion)(&(&devlink_port->type_warn_dw)->work) irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &devlink_port->type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex net_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &tn->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &x->wait#9 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &k->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex gdp_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex gdp_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex gdp_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex gdp_mutex &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex gdp_mutex lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex gdp_mutex kobj_ns_type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex lock kernfs_idr_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex bus_type_sem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex sysfs_symlink_target_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &dev->power.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex dpm_list_mtx irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex uevent_sock_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &k->k_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex subsys mutex#17 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &dir->lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex dev_hotplug_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex dev_base_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex input_pool.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex batched_entropy_u32.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &tbl->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex sysctl_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex failover_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex pcpu_alloc_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex proc_subdir_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex proc_subdir_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &idev->mc_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &idev->mc_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &idev->mc_lock _xmit_ETHER &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &idev->mc_lock _xmit_ETHER &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &pnettable->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex smc_ib_devices.mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &vn->sock_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex lock kernfs_idr_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex lock kernfs_idr_lock &____s->seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex fill_pool_map-wait-type-override pool_lock irq_context: softirq (&app->join_timer) &app->lock batched_entropy_u32.lock crngs.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex rcu_read_lock &rq->__lock irq_context: softirq &(&hwstats->traffic_dw)->timer irq_context: softirq &(&hwstats->traffic_dw)->timer rcu_read_lock &pool->lock irq_context: softirq &(&hwstats->traffic_dw)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&hwstats->traffic_dw)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&hwstats->traffic_dw)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq &(&hwstats->traffic_dw)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 fill_pool_map-wait-type-override pool_lock irq_context: softirq (&app->join_timer) &app->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&app->join_timer) &app->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 (wq_completion)bond0#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond0#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond0#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond0#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond0#6 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex bpf_devs_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex bpf_devs_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex bpf_devs_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex bpf_devs_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex bpf_devs_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex bpf_devs_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex bpf_devs_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &idev->mc_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 quarantine_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 remove_cache_srcu irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 remove_cache_srcu &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) &hwstats->hwsdev_list_lock irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) &base->lock irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) &base->lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &wg->static_identity.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &wg->static_identity.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock fs_reclaim irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &____s->seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock pcpu_alloc_mutex irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock pcpu_alloc_mutex pcpu_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &handshake->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &sb->s_type->i_mutex_key#3 remove_cache_srcu &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &handshake->lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &obj_hash[i].lock pool_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock tk_core.seq.seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &table->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &peer->endpoint_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 rtnl_mutex &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#2 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#52 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 crngs.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 devlinks.xa_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &xa->xa_lock#15 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 pcpu_alloc_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &base->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 pin_fs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 batched_entropy_u32.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &n->list_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &n->list_lock &c->lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &devlink->lock_key#2 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &devlink->lock_key#2 &devlink_port->type_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &data->fib_event_queue_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &(&net->nexthop.notifier_chain)->rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex.wait_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_read_lock &data->fib_event_queue_lock irq_context: 0 rtnl_mutex &tb->tb6_lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex &tb->tb6_lock rcu_read_lock &data->fib_event_queue_lock irq_context: 0 rtnl_mutex devnet_rename_sem &c->lock irq_context: 0 rtnl_mutex devnet_rename_sem &n->list_lock irq_context: 0 rtnl_mutex devnet_rename_sem &n->list_lock &c->lock irq_context: 0 rtnl_mutex devnet_rename_sem &____s->seqcount irq_context: softirq &(&nsim_dev->trap_data->trap_report_dw)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&nsim_dev->trap_data->trap_report_dw)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rcu_read_lock &data->fib_event_queue_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rcu_read_lock &tb->tb6_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rcu_read_lock &tb->tb6_lock &data->fib_event_queue_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &(&fn_net->fib_chain)->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock crngs.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock &nsim_dev->fa_cookie_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &devlink_port->type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 stack_depot_init_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex bpf_devs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex bpf_devs_lock fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex bpf_devs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex bpf_devs_lock rcu_read_lock rhashtable_bucket irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex pin_fs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &xa->xa_lock#3 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &base->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex (work_completion)(&(&devlink_port->type_warn_dw)->work) irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &devlink_port->type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex net_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &tn->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &x->wait#9 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &k->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex gdp_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex gdp_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex gdp_mutex lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex gdp_mutex kobj_ns_type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex lock kernfs_idr_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex bus_type_sem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex sysfs_symlink_target_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex remove_cache_srcu irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &pool->lock &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &dev->power.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex dpm_list_mtx irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex uevent_sock_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &k->k_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex subsys mutex#17 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &dir->lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex dev_hotplug_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex dev_base_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex input_pool.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex batched_entropy_u32.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &tbl->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex sysctl_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex failover_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex pcpu_alloc_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex proc_subdir_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex proc_subdir_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &idev->mc_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &pnettable->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex smc_ib_devices.mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &vn->sock_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock &c->lock irq_context: 0 rtnl_mutex _xmit_SIT irq_context: 0 rtnl_mutex &tb->tb6_lock rcu_read_lock &c->lock irq_context: 0 rtnl_mutex &tb->tb6_lock rcu_read_lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &idev->mc_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex devnet_rename_sem uevent_sock_mutex &c->lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex remove_cache_srcu rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock nl_table_wait.lock &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem nl_table_wait.lock &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#3 rtnl_mutex uevent_sock_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 rtnl_mutex dev_addr_sem &br->lock irq_context: 0 rtnl_mutex dev_addr_sem &br->lock &br->hash_lock irq_context: 0 rtnl_mutex dev_addr_sem &br->lock &br->hash_lock pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem &br->lock &br->hash_lock rcu_read_lock rhashtable_bucket irq_context: 0 rtnl_mutex dev_addr_sem &br->lock &br->hash_lock nl_table_lock irq_context: 0 rtnl_mutex dev_addr_sem &br->lock &br->hash_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex dev_addr_sem &br->lock &br->hash_lock nl_table_wait.lock irq_context: 0 rtnl_mutex dev_addr_sem &br->lock &br->hash_lock nl_table_wait.lock &p->pi_lock irq_context: 0 rtnl_mutex dev_addr_sem nl_table_wait.lock &p->pi_lock irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key/1 irq_context: 0 rtnl_mutex &br->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &br->lock &base->lock irq_context: 0 rtnl_mutex &br->lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key/1 pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key/1 irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key/1 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &rq->__lock irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock nl_table_wait.lock &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex nl_table_wait.lock &p->pi_lock irq_context: softirq (&brmctx->ip6_own_query.timer) irq_context: softirq (&brmctx->ip6_own_query.timer) &br->multicast_lock irq_context: softirq (&brmctx->ip4_own_query.timer) irq_context: softirq (&brmctx->ip4_own_query.timer) &br->multicast_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock uevent_sock_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 crngs.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 devlinks.xa_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 nl_table_wait.lock &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &xa->xa_lock#15 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 pcpu_alloc_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &base->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 pin_fs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key/1 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 batched_entropy_u32.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &(&net->nexthop.notifier_chain)->rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock &data->fib_event_queue_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &base->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex _xmit_TUNNEL irq_context: 0 cb_lock genl_mutex genl_mutex.wait_lock irq_context: 0 cb_lock genl_mutex &rq->__lock irq_context: 0 cb_lock genl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex _xmit_IPGRE irq_context: 0 rtnl_mutex _xmit_IPGRE &c->lock irq_context: 0 rtnl_mutex _xmit_IPGRE &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock &tb->tb6_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock &tb->tb6_lock &data->fib_event_queue_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &(&fn_net->fib_chain)->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &obj_hash[i].lock pool_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &wg->static_identity.lock &rq->__lock irq_context: softirq (&in_dev->mr_ifc_timer) irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &in_dev->mc_tomb_lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock rcu_read_lock pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock rcu_read_lock &dir->lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock rcu_read_lock &ul->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &____s->seqcount#7 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &nf_conntrack_locks[i] irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &dir->lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &tbl->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &n->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &____s->seqcount#9 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex.wait_lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 cb_lock genl_mutex &p->pi_lock irq_context: 0 cb_lock genl_mutex &p->pi_lock &rq->__lock irq_context: softirq (&in_dev->mr_ifc_timer) batched_entropy_u32.lock irq_context: 0 cb_lock genl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&in_dev->mr_ifc_timer) &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex.wait_lock irq_context: softirq (&in_dev->mr_ifc_timer) &base->lock irq_context: 0 cb_lock &p->pi_lock irq_context: softirq (&in_dev->mr_ifc_timer) &base->lock &obj_hash[i].lock irq_context: 0 cb_lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rq->__lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &devlink->lock_key#3 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &devlink->lock_key#3 &devlink_port->type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex.wait_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock batched_entropy_u32.lock irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock crngs.lock irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock &base->lock irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rt6_exception_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock crngs.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock &nsim_dev->fa_cookie_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &devlink_port->type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 stack_depot_init_mutex irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock quarantine_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock batched_entropy_u32.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex bpf_devs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex bpf_devs_lock fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex bpf_devs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex bpf_devs_lock rcu_read_lock rhashtable_bucket irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex bpf_devs_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex bpf_devs_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex bpf_devs_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex bpf_devs_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex bpf_devs_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex pin_fs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &xa->xa_lock#3 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &base->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex (work_completion)(&(&devlink_port->type_warn_dw)->work) irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &devlink_port->type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex net_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &tn->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &x->wait#9 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &k->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex gdp_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex gdp_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex gdp_mutex lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex gdp_mutex kobj_ns_type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex lock kernfs_idr_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex bus_type_sem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex sysfs_symlink_target_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &dev->power.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex dpm_list_mtx irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &k->k_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex subsys mutex#17 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &dir->lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex dev_hotplug_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex dev_base_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex input_pool.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex batched_entropy_u32.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &tbl->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex sysctl_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex failover_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex pcpu_alloc_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex proc_subdir_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex proc_subdir_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &idev->mc_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &pnettable->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex smc_ib_devices.mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &vn->sock_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&br->gc_work)->timer irq_context: softirq &(&br->gc_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&br->gc_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&br->gc_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&br->gc_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&br->gc_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) &base->lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex rcu_read_lock &pool->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex _xmit_TUNNEL6 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex bpf_devs_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex bpf_devs_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &idev->mc_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#4 rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ifa->lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ifa->lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 rcu_read_lock &ndev->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 crngs.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 fs_reclaim irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 devlinks.xa_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &xa->xa_lock#15 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 pcpu_alloc_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &base->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 pin_fs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 batched_entropy_u32.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &(&net->nexthop.notifier_chain)->rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex.wait_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rcu_read_lock &data->fib_event_queue_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock crngs.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock &nsim_dev->fa_cookie_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock &____s->seqcount irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &devlink->lock_key#4 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &devlink->lock_key#4 &devlink_port->type_lock irq_context: 0 rtnl_mutex dev_addr_sem &br->lock &br->hash_lock &c->lock irq_context: 0 rtnl_mutex dev_addr_sem &br->lock &br->hash_lock &n->list_lock irq_context: 0 rtnl_mutex dev_addr_sem &br->lock &br->hash_lock &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rcu_read_lock &tb->tb6_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rcu_read_lock &tb->tb6_lock &data->fib_event_queue_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &(&fn_net->fib_chain)->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &devlink_port->type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 stack_depot_init_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex bpf_devs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex bpf_devs_lock fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex bpf_devs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex bpf_devs_lock rcu_read_lock rhashtable_bucket irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex pin_fs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &n->list_lock &c->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock &c->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock &____s->seqcount irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &c->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &xa->xa_lock#3 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &base->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex (work_completion)(&(&devlink_port->type_warn_dw)->work) irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &devlink_port->type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex net_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &tn->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &x->wait#9 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &k->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex gdp_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex gdp_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex gdp_mutex lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex gdp_mutex kobj_ns_type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex lock kernfs_idr_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex bus_type_sem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex sysfs_symlink_target_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &dev->power.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex dpm_list_mtx irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex uevent_sock_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &k->k_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex subsys mutex#17 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &dir->lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex dev_hotplug_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex dev_base_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex input_pool.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex batched_entropy_u32.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &tbl->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex sysctl_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex failover_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex pcpu_alloc_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex proc_subdir_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex proc_subdir_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &idev->mc_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &pnettable->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex smc_ib_devices.mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &vn->sock_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 rtnl_mutex dev_addr_sem &pcp->lock &zone->lock irq_context: 0 rtnl_mutex dev_addr_sem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &bond->stats_lock/1 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 rcu_read_lock _xmit_ETHER irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 rcu_read_lock _xmit_ETHER irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 rcu_read_lock _xmit_ETHER pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock crngs.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock &nsim_dev->fa_cookie_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 rtnl_mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#5 &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex &ndev->lock &____s->seqcount irq_context: softirq (&tun->flow_gc_timer) irq_context: softirq (&tun->flow_gc_timer) &tun->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pcpu_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 crngs.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 devlinks.xa_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &xa->xa_lock#15 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 pcpu_alloc_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &base->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 pin_fs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &rq->__lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 batched_entropy_u32.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &(&net->nexthop.notifier_chain)->rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex.wait_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rcu_read_lock &data->fib_event_queue_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rcu_read_lock &tb->tb6_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rcu_read_lock &tb->tb6_lock &data->fib_event_queue_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &(&fn_net->fib_chain)->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &devlink_port->type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 stack_depot_init_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex bpf_devs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex bpf_devs_lock fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex bpf_devs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex bpf_devs_lock rcu_read_lock rhashtable_bucket irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex pin_fs_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &xa->xa_lock#3 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &base->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex (work_completion)(&(&devlink_port->type_warn_dw)->work) irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &devlink_port->type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex net_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &tn->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &x->wait#9 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &k->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex gdp_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex gdp_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex gdp_mutex lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex gdp_mutex kobj_ns_type_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex lock kernfs_idr_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex bus_type_sem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex sysfs_symlink_target_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex lock kernfs_idr_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex lock kernfs_idr_lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &dev->power.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex dpm_list_mtx irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex uevent_sock_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &k->k_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex subsys mutex#17 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &dir->lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex uevent_sock_mutex &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &pcp->lock &zone->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex dev_hotplug_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex dev_base_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex input_pool.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex batched_entropy_u32.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &tbl->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex sysctl_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex failover_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex pcpu_alloc_mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex proc_subdir_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex proc_subdir_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &idev->mc_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &pnettable->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex smc_ib_devices.mutex irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &vn->sock_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &obj_hash[i].lock pool_lock irq_context: 0 cb_lock genl_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &devlink->lock_key#5 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &devlink->lock_key#5 &devlink_port->type_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &dir->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex pcpu_alloc_mutex irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock nl_table_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock nl_table_wait.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex (inet6addr_validator_chain).rwsem irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &net->ipv6.addrconf_hash_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex pcpu_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &ifa->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &ndev->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &ndev->lock &ifa->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &idev->mc_lock &base->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &idev->mc_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &ndev->lock &ifa->lock batched_entropy_u32.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &ndev->lock &ifa->lock crngs.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &ndev->lock &ifa->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &ndev->lock &ifa->lock &base->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &ndev->lock &ifa->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#2/1 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex bpf_devs_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex bpf_devs_lock &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex bpf_devs_lock &n->list_lock &c->lock irq_context: softirq (&app->periodic_timer) irq_context: softirq (&app->periodic_timer) &app->lock irq_context: softirq (&app->periodic_timer) &app->lock &obj_hash[i].lock irq_context: softirq (&app->periodic_timer) &app->lock &base->lock irq_context: softirq (&app->periodic_timer) &app->lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex dev_addr_sem team->team_lock_key#2 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#2/1 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#2/1 rcu_read_lock _xmit_ETHER irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#2/1 rcu_read_lock _xmit_ETHER pool_lock#2 irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock &n->list_lock irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#2 pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock crngs.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock &nsim_dev->fa_cookie_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex bpf_devs_lock remove_cache_srcu irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex bpf_devs_lock remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex bpf_devs_lock remove_cache_srcu &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex bpf_devs_lock remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex bpf_devs_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex bpf_devs_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#52 nsim_bus_dev_list_lock &dev->mutex &devlink->lock_key#6 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex dev_addr_sem &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events_power_efficient (check_lifetime_work).work rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock batched_entropy_u32.lock crngs.lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &devlink->lock_key#6 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &devlink->lock_key#6 &devlink_port->type_lock irq_context: 0 rtnl_mutex dev_addr_sem &br->lock &br->hash_lock &____s->seqcount irq_context: 0 rtnl_mutex &ndev->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex &ndev->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex &ndev->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex &ndev->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &dir->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock deferred_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock (console_sem).lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock console_lock console_srcu console_owner irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock nl_table_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock nl_table_wait.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &base->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &br->multicast_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock lweventlist_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock lweventlist_lock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock lweventlist_lock &dir->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &c->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock crngs.lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock quarantine_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &n->list_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &br->lock &n->list_lock &c->lock irq_context: softirq rcu_read_lock &br->hash_lock irq_context: softirq rcu_read_lock &br->hash_lock pool_lock#2 irq_context: softirq rcu_read_lock &br->hash_lock rcu_read_lock rhashtable_bucket irq_context: softirq rcu_read_lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: softirq rcu_read_lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq rcu_read_lock &br->hash_lock nl_table_lock irq_context: softirq rcu_read_lock &br->hash_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &br->hash_lock nl_table_wait.lock irq_context: softirq rcu_read_lock &br->multicast_lock irq_context: softirq rcu_read_lock &br->multicast_lock pool_lock#2 irq_context: softirq rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: softirq rcu_read_lock &br->multicast_lock &dir->lock#2 irq_context: softirq rcu_read_lock &br->multicast_lock deferred_lock irq_context: softirq rcu_read_lock &br->multicast_lock nl_table_lock irq_context: softirq rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: softirq rcu_read_lock &br->multicast_lock &base->lock irq_context: softirq rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &ndev->lock irq_context: 0 rtnl_mutex devnet_rename_sem remove_cache_srcu irq_context: 0 rtnl_mutex devnet_rename_sem remove_cache_srcu quarantine_lock irq_context: 0 rtnl_mutex devnet_rename_sem remove_cache_srcu &c->lock irq_context: 0 rtnl_mutex devnet_rename_sem remove_cache_srcu &n->list_lock irq_context: 0 rtnl_mutex devnet_rename_sem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex devnet_rename_sem remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq (&pmctx->ip6_own_query.timer) irq_context: softirq (&pmctx->ip6_own_query.timer) &br->multicast_lock irq_context: softirq (&pmctx->ip4_own_query.timer) irq_context: softirq (&pmctx->ip4_own_query.timer) &br->multicast_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock crngs.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock &nsim_dev->fa_cookie_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock &____s->seqcount irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 rcu_read_lock _xmit_ETHER pool_lock#2 irq_context: 0 cb_lock genl_mutex &pcp->lock &zone->lock irq_context: 0 cb_lock genl_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock kfence_freelist_lock irq_context: softirq rcu_read_lock &br->multicast_lock rcu_read_lock &pool->lock irq_context: softirq rcu_read_lock &br->multicast_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &br->multicast_lock &c->lock irq_context: softirq rcu_read_lock &br->multicast_lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &____s->seqcount irq_context: 0 rtnl_mutex &tb->tb6_lock rcu_read_lock &n->list_lock irq_context: 0 rtnl_mutex &tb->tb6_lock rcu_read_lock &n->list_lock &c->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: softirq &(&idev->mc_dad_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex dev_addr_sem &net->ipv6.fib6_gc_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex dev_addr_sem &net->ipv6.fib6_gc_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock &pcp->lock &zone->lock irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &dir->lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock deferred_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex quarantine_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem team->team_lock_key irq_context: 0 rtnl_mutex dev_addr_sem team->team_lock_key#4 irq_context: 0 rtnl_mutex &ndev->lock &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#2 &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#2 lweventlist_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#2 lweventlist_lock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#2 lweventlist_lock &dir->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#2 rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#2 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&conn->info_timer)->timer irq_context: softirq &(&conn->info_timer)->timer rcu_read_lock &pool->lock irq_context: softirq &(&conn->info_timer)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&conn->info_timer)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&conn->info_timer)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&conn->info_timer)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&conn->info_timer)->work) irq_context: 0 (wq_completion)events (work_completion)(&(&conn->info_timer)->work) &conn->chan_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#2/1 rcu_read_lock _xmit_ETHER irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#2/1 rcu_read_lock _xmit_ETHER &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#2/1 rcu_read_lock _xmit_ETHER &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#2/1 rcu_read_lock _xmit_ETHER pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex _xmit_ETHER/1 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock quarantine_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &ndev->lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex dev_addr_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex dev_addr_sem batched_entropy_u8.lock irq_context: 0 rtnl_mutex dev_addr_sem kfence_freelist_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) &meta->lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) kfence_freelist_lock irq_context: softirq rcu_read_lock &n->list_lock irq_context: softirq rcu_read_lock &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER/1 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &base->lock &obj_hash[i].lock irq_context: softirq &(&forw_packet_aggr->delayed_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key/1 &c->lock irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key/1 &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &hsr->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &new_node->seq_out_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 lweventlist_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 lweventlist_lock &dir->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#4 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex lweventlist_lock &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex lweventlist_lock &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex lweventlist_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex lweventlist_lock &base->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex lweventlist_lock &base->lock &obj_hash[i].lock irq_context: softirq (&hsr->announce_timer) irq_context: softirq (&hsr->announce_timer) rcu_read_lock pool_lock#2 irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock &new_node->seq_out_lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock pool_lock#2 irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock &obj_hash[i].lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock pool_lock#2 irq_context: softirq (&hsr->announce_timer) rcu_read_lock &obj_hash[i].lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &base->lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock quarantine_lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key lweventlist_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key lweventlist_lock &dir->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &ndev->lock &ifa->lock batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &c->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex dev_addr_sem team->team_lock_key#5 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &c->lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &n->list_lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &n->list_lock &c->lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock &pcp->lock &zone->lock irq_context: 0 rtnl_mutex &tb->tb6_lock quarantine_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem remove_cache_srcu irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem remove_cache_srcu quarantine_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem remove_cache_srcu &c->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem remove_cache_srcu &n->list_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &x->wait#17 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex dev_addr_sem net_rwsem &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock remove_cache_srcu irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &new_node->seq_out_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock pool_lock#2 irq_context: 0 rtnl_mutex &nn->netlink_tap_lock irq_context: 0 rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &____s->seqcount irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem quarantine_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock &br->hash_lock &c->lock irq_context: softirq rcu_read_lock &br->hash_lock &____s->seqcount irq_context: softirq rcu_read_lock &br->multicast_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem batched_entropy_u8.lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem kfence_freelist_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex j1939_netdev_lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &bat_priv->tt.changes_list_lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &bat_priv->tt.changes_list_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &bat_priv->tt.changes_list_lock pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock key#16 irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &batadv_netdev_addr_lock_key/1 irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex pgd_lock irq_context: 0 rtnl_mutex key irq_context: 0 rtnl_mutex percpu_counters_lock irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock quarantine_lock irq_context: 0 rtnl_mutex remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 rtnl_mutex remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events deferred_process_work &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#5 &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#5 lweventlist_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#5 lweventlist_lock &dir->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#5 rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#5 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex dev_addr_sem team->team_lock_key#6 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &batadv_netdev_addr_lock_key/1 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex &ndev->lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex netpoll_srcu &rq->__lock irq_context: 0 rtnl_mutex &vlan_netdev_addr_lock_key/1 irq_context: 0 rtnl_mutex &vlan_netdev_addr_lock_key/1 _xmit_ETHER irq_context: 0 rtnl_mutex &vlan_netdev_addr_lock_key/1 pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock &n->list_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &meta->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh kfence_freelist_lock irq_context: softirq rcu_read_lock quarantine_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock remove_cache_srcu &rq->__lock irq_context: 0 rtnl_mutex dev_addr_sem team->team_lock_key#3 irq_context: 0 rtnl_mutex dev_addr_sem _xmit_ETHER irq_context: 0 rtnl_mutex dev_addr_sem _xmit_ETHER pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem _xmit_ETHER (console_sem).lock irq_context: 0 rtnl_mutex dev_addr_sem _xmit_ETHER console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex dev_addr_sem _xmit_ETHER console_lock console_srcu console_owner irq_context: 0 rtnl_mutex dev_addr_sem _xmit_ETHER console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex dev_addr_sem _xmit_ETHER console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock rcu_read_lock &c->lock irq_context: softirq (&in_dev->mr_ifc_timer) batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 _xmit_ETHER irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 _xmit_ETHER &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 _xmit_ETHER &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 _xmit_ETHER pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex kfence_freelist_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &meta->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex net_rwsem &rq->__lock irq_context: softirq (&hsr->prune_timer) irq_context: softirq (&hsr->prune_timer) &hsr->list_lock irq_context: softirq (&hsr->prune_timer) &obj_hash[i].lock irq_context: softirq (&hsr->prune_timer) &base->lock irq_context: softirq (&hsr->prune_timer) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock quarantine_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#6 lweventlist_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#6 lweventlist_lock &dir->lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) pool_lock#2 irq_context: 0 rtnl_mutex &macvlan_netdev_addr_lock_key/1 irq_context: 0 rtnl_mutex _xmit_ETHER (console_sem).lock irq_context: 0 rtnl_mutex _xmit_ETHER console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex _xmit_ETHER console_lock console_srcu console_owner irq_context: 0 rtnl_mutex _xmit_ETHER console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex _xmit_ETHER console_lock console_srcu console_owner console_owner_lock irq_context: 0 rtnl_mutex &macvlan_netdev_addr_lock_key/1 _xmit_ETHER irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/1 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/1 _xmit_ETHER irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/1 _xmit_ETHER pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock &n->list_lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock krc.lock &obj_hash[i].lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock krc.lock &base->lock irq_context: 0 rtnl_mutex dev_addr_sem &tbl->lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &ipvlan->addrs_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &ipvlan->addrs_lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &ipvlan->addrs_lock irq_context: 0 rtnl_mutex rcu_read_lock &ipvlan->addrs_lock pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem fill_pool_map-wait-type-override pool_lock irq_context: softirq rcu_read_lock &list->lock#13 irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock/1 irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&port->bc_work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&port->bc_work) &list->lock#13 irq_context: 0 (wq_completion)events_unbound (work_completion)(&port->bc_work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&port->bc_work) pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/1 &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/1 _xmit_ETHER &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/1 _xmit_ETHER &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 lweventlist_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 lweventlist_lock &dir->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex team->team_lock_key#3 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#3/1 irq_context: 0 rtnl_mutex &vlan_netdev_addr_lock_key/1 &c->lock irq_context: 0 rtnl_mutex &vlan_netdev_addr_lock_key/1 &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER/1 &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER/1 &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macsec_netdev_addr_lock_key/1 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macsec_netdev_addr_lock_key/1 &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macsec_netdev_addr_lock_key/1 &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &ndev->lock irq_context: 0 rtnl_mutex &macsec_netdev_addr_lock_key/1 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &macsec_netdev_addr_lock_key/1 _xmit_ETHER irq_context: 0 rtnl_mutex &macsec_netdev_addr_lock_key/1 _xmit_ETHER pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex lweventlist_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macsec_netdev_addr_lock_key/1 _xmit_ETHER irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macsec_netdev_addr_lock_key/1 _xmit_ETHER pool_lock#2 irq_context: softirq rcu_read_lock &meta->lock irq_context: softirq rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex dev_addr_sem &hard_iface->bat_iv.ogm_buff_mutex irq_context: 0 rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex key#20 irq_context: 0 rtnl_mutex &bat_priv->tt.commit_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/1 &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &sb->s_type->i_lock_key#8 irq_context: 0 rtnl_mutex &dir->lock irq_context: 0 rtnl_mutex k-sk_lock-AF_INET irq_context: 0 rtnl_mutex k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 rtnl_mutex k-sk_lock-AF_INET &table->hash[i].lock irq_context: 0 rtnl_mutex k-sk_lock-AF_INET &table->hash[i].lock k-clock-AF_INET irq_context: 0 rtnl_mutex k-sk_lock-AF_INET &table->hash[i].lock &table->hash2[i].lock irq_context: 0 rtnl_mutex k-slock-AF_INET irq_context: 0 rtnl_mutex rcu_read_lock (console_sem).lock irq_context: 0 rtnl_mutex rcu_read_lock console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex rcu_read_lock console_lock console_srcu console_owner irq_context: 0 rtnl_mutex rcu_read_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex rcu_read_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 &mm->mmap_lock fs_reclaim pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock &meta->lock irq_context: 0 rtnl_mutex rcu_read_lock pgd_lock irq_context: 0 rtnl_mutex rcu_read_lock key irq_context: 0 rtnl_mutex rcu_read_lock pcpu_lock irq_context: 0 rtnl_mutex rcu_read_lock percpu_counters_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh key#20 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &entry->crc_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &ul->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock crngs.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u32.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex k-sk_lock-AF_INET6 irq_context: 0 rtnl_mutex k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 rtnl_mutex k-slock-AF_INET6 irq_context: 0 rtnl_mutex k-sk_lock-AF_INET6 &table->hash[i].lock irq_context: 0 rtnl_mutex k-sk_lock-AF_INET6 &table->hash[i].lock k-clock-AF_INET6 irq_context: 0 rtnl_mutex k-sk_lock-AF_INET6 &table->hash[i].lock &table->hash2[i].lock irq_context: 0 rtnl_mutex &wg->device_update_lock irq_context: 0 rtnl_mutex &wg->device_update_lock fs_reclaim irq_context: 0 rtnl_mutex &wg->device_update_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &wg->device_update_lock pool_lock#2 irq_context: 0 rtnl_mutex &wg->device_update_lock mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &wg->device_update_lock &sb->s_type->i_lock_key#8 irq_context: 0 rtnl_mutex &wg->device_update_lock &____s->seqcount irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex &wg->device_update_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &wg->device_update_lock &dir->lock irq_context: 0 rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET irq_context: 0 rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET &table->hash[i].lock irq_context: 0 rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET &table->hash[i].lock k-clock-AF_INET irq_context: 0 rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET &table->hash[i].lock &table->hash2[i].lock irq_context: 0 rtnl_mutex &wg->device_update_lock k-slock-AF_INET irq_context: 0 rtnl_mutex &wg->device_update_lock cpu_hotplug_lock irq_context: 0 rtnl_mutex &wg->device_update_lock cpu_hotplug_lock jump_label_mutex irq_context: 0 rtnl_mutex &wg->device_update_lock cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 rtnl_mutex &wg->device_update_lock cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET6 irq_context: 0 rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 rtnl_mutex &wg->device_update_lock k-slock-AF_INET6 irq_context: 0 rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET6 &table->hash[i].lock irq_context: 0 rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET6 &table->hash[i].lock k-clock-AF_INET6 irq_context: 0 rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET6 &table->hash[i].lock &table->hash2[i].lock irq_context: 0 rtnl_mutex &wg->device_update_lock &wg->socket_update_lock irq_context: 0 rtnl_mutex &wg->device_update_lock &wg->socket_update_lock &rq->__lock irq_context: softirq &(&bat_priv->tt.work)->timer irq_context: softirq &(&bat_priv->tt.work)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&bat_priv->tt.work)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&bat_priv->tt.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex rcu_node_0 irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex &rq->__lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &wg->device_update_lock &list->lock#14 irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_read_lock_bh rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg0 irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET batched_entropy_u32.lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock &____s->seqcount#10 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-kex-wg0 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) key#16 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) key#21 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &bat_priv->tt.req_list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &bat_priv->tt.roam_list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &base->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &batadv_netdev_addr_lock_key/1 &c->lock irq_context: 0 rtnl_mutex &batadv_netdev_addr_lock_key/1 &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock &n->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock &n->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock &n->lock &____s->seqcount#9 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock nl_table_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock rlock-AF_NETLINK irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock nl_table_wait.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock lock#8 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock id_table_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock &dir->lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock krc.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock krc.lock &base->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock &____s->seqcount irq_context: 0 rtnl_mutex &wg->device_update_lock remove_cache_srcu irq_context: 0 rtnl_mutex &wg->device_update_lock remove_cache_srcu quarantine_lock irq_context: 0 rtnl_mutex &wg->device_update_lock remove_cache_srcu &c->lock irq_context: 0 rtnl_mutex &wg->device_update_lock remove_cache_srcu &n->list_lock irq_context: 0 rtnl_mutex &wg->device_update_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex &wg->device_update_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 rtnl_mutex &wg->device_update_lock &c->lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &wg->device_update_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1 irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock &r->producer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0#2 irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#2 irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: softirq &(&bat_priv->tt.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&bat_priv->tt.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &list->lock#14 irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1 irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh &r->producer_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0 irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: softirq &keypair->receiving_counter.lock irq_context: softirq &peer->keypairs.keypair_update_lock irq_context: softirq &list->lock#14 irq_context: softirq rcu_read_lock_bh &r->producer_lock#2 irq_context: softirq rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: softirq rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock (console_sem).lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock console_lock console_srcu console_owner irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &rcu_state.gp_wq irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_validator_chain).rwsem &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex dev_addr_sem remove_cache_srcu irq_context: 0 rtnl_mutex dev_addr_sem remove_cache_srcu quarantine_lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 (wq_completion)wg-kex-wg2 irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 misc_mtx rfkill_global_mutex irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 misc_mtx rfkill_global_mutex &data->mtx irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 misc_mtx rfkill_global_mutex &data->mtx fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 misc_mtx rfkill_global_mutex &data->mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 misc_mtx rfkill_global_mutex &data->mtx pool_lock#2 irq_context: 0 misc_mtx rfkill_global_mutex &data->mtx &rfkill->lock irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 cb_lock rcu_read_lock &____s->seqcount irq_context: 0 cb_lock rcu_read_lock pool_lock#2 irq_context: 0 cb_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 cb_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 cb_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 cb_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 cb_lock genl_mutex hwsim_radio_lock irq_context: 0 cb_lock genl_mutex &x->wait#9 irq_context: 0 cb_lock genl_mutex batched_entropy_u32.lock irq_context: 0 cb_lock genl_mutex batched_entropy_u32.lock crngs.lock irq_context: 0 cb_lock genl_mutex &k->list_lock irq_context: 0 cb_lock genl_mutex gdp_mutex irq_context: 0 cb_lock genl_mutex gdp_mutex &k->list_lock irq_context: 0 cb_lock genl_mutex lock irq_context: 0 cb_lock genl_mutex lock kernfs_idr_lock irq_context: 0 cb_lock genl_mutex &root->kernfs_rwsem irq_context: 0 cb_lock genl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 cb_lock genl_mutex bus_type_sem irq_context: 0 cb_lock genl_mutex sysfs_symlink_target_lock irq_context: 0 cb_lock genl_mutex &root->kernfs_rwsem irq_context: 0 cb_lock genl_mutex &dev->power.lock irq_context: 0 cb_lock genl_mutex dpm_list_mtx irq_context: 0 cb_lock genl_mutex uevent_sock_mutex irq_context: 0 cb_lock genl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 cb_lock genl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 cb_lock genl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 cb_lock genl_mutex uevent_sock_mutex &c->lock irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 cb_lock genl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 cb_lock genl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex subsys mutex#53 irq_context: 0 cb_lock genl_mutex subsys mutex#53 &k->k_lock irq_context: 0 cb_lock genl_mutex device_links_lock irq_context: 0 cb_lock genl_mutex &k->k_lock irq_context: 0 cb_lock genl_mutex deferred_probe_mutex irq_context: 0 cb_lock genl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 cb_lock genl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex uevent_sock_mutex &n->list_lock irq_context: 0 cb_lock genl_mutex uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 cb_lock genl_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex &wq->mutex irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 cb_lock genl_mutex wq_pool_mutex irq_context: 0 cb_lock genl_mutex wq_pool_mutex &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#2 irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &list->lock#14 irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2 irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 cb_lock genl_mutex wq_pool_mutex &wq->mutex irq_context: 0 cb_lock genl_mutex crngs.lock irq_context: 0 cb_lock genl_mutex triggers_list_lock irq_context: 0 cb_lock genl_mutex leds_list_lock irq_context: 0 cb_lock genl_mutex leds_list_lock &led_cdev->trigger_lock irq_context: 0 cb_lock genl_mutex &zone->lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &rq->__lock irq_context: 0 cb_lock genl_mutex &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &new_node->seq_out_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#14 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &tbl->lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &tbl->lock batched_entropy_u32.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &tbl->lock batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &tbl->lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex fs_reclaim irq_context: 0 cb_lock genl_mutex rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rtnl_mutex &____s->seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex param_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex param_lock rate_ctrl_mutex irq_context: 0 cb_lock genl_mutex rtnl_mutex (console_sem).lock irq_context: 0 rtnl_mutex dev_addr_sem _xmit_ETHER &c->lock irq_context: 0 rtnl_mutex dev_addr_sem _xmit_ETHER &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx fs_reclaim irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &k->list_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex &k->list_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex fs_reclaim irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex lock kernfs_idr_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex &root->kernfs_rwsem irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex kobj_ns_type_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx lock kernfs_idr_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx bus_type_sem irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx sysfs_symlink_target_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &____s->seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &dev->power.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx dpm_list_mtx irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex fs_reclaim irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex nl_table_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex nl_table_wait.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &k->k_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx subsys mutex#54 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx subsys mutex#54 &k->k_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx pin_fs_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg0#3 irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET &c->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0#3 (work_completion)(&peer->transmit_handshake_work) &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx nl_table_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx nl_table_wait.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx reg_requests_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &base->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex irq_context: 0 cb_lock genl_mutex rfkill_global_mutex fs_reclaim irq_context: 0 cb_lock genl_mutex rfkill_global_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rfkill_global_mutex pool_lock#2 irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &k->list_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex lock kernfs_idr_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &root->kernfs_rwsem irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 cb_lock genl_mutex rfkill_global_mutex bus_type_sem irq_context: 0 cb_lock genl_mutex rfkill_global_mutex sysfs_symlink_target_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &c->lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &____s->seqcount irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &root->kernfs_rwsem irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &dev->power.lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex dpm_list_mtx irq_context: 0 cb_lock genl_mutex rfkill_global_mutex rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &rfkill->lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex fs_reclaim irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex nl_table_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &k->k_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex subsys mutex#41 irq_context: 0 cb_lock genl_mutex rfkill_global_mutex subsys mutex#41 &k->k_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex triggers_list_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex leds_list_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex leds_list_lock &led_cdev->trigger_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex rcu_read_lock &pool->lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &rq->__lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex.wait_lock irq_context: 0 cb_lock genl_mutex pin_fs_lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &new_node->seq_out_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) irq_context: softirq (&ndev->rs_timer) &ndev->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock &ndev->lock irq_context: softirq (&ndev->rs_timer) pool_lock#2 irq_context: softirq (&ndev->rs_timer) &dir->lock#2 irq_context: softirq (&ndev->rs_timer) &ul->lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#12 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &ndev->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: softirq (&ndev->rs_timer) &ndev->lock batched_entropy_u32.lock irq_context: softirq (&ndev->rs_timer) &ndev->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) &ndev->lock &base->lock irq_context: softirq (&ndev->rs_timer) &ndev->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &batadv_netdev_addr_lock_key/1 &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &batadv_netdev_addr_lock_key/1 &____s->seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx stack_depot_init_mutex irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx pcpu_alloc_mutex irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx pcpu_alloc_mutex pcpu_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &pcp->lock &zone->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &local->iflist_mtx irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &obj_hash[i].lock pool_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &xa->xa_lock#3 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx net_rwsem irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx net_rwsem &list->lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &tn->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &x->wait#9 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx lock kernfs_idr_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex &n->list_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx subsys mutex#17 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx subsys mutex#17 &k->k_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &dir->lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx dev_hotplug_mutex irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx dev_hotplug_mutex &dev->power.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx dev_base_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx input_pool.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx batched_entropy_u32.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &tbl->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx sysctl_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx failover_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx proc_subdir_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx proc_inum_ida.xa_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx proc_subdir_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock fs_reclaim irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock &obj_hash[i].lock pool_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock _xmit_ETHER irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock _xmit_ETHER &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock _xmit_ETHER &____s->seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock _xmit_ETHER pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &pnettable->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx smc_ib_devices.mutex irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override pool_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &ndev->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &wdev->mtx irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &fq->lock irq_context: 0 cb_lock genl_mutex (inetaddr_chain).rwsem irq_context: 0 cb_lock genl_mutex inet6addr_chain.lock irq_context: 0 cb_lock genl_mutex hwsim_radio_lock rcu_read_lock rhashtable_bucket irq_context: 0 cb_lock genl_mutex nl_table_lock irq_context: 0 cb_lock genl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &base->lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#3 irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0#4 irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg1#4 irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &list->lock#14 irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#2 irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#3 irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg2#4 irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &list->lock#14 irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg2#2 irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 cb_lock rtnl_mutex &rq->__lock irq_context: 0 cb_lock rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 cb_lock rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 misc_mtx rfkill_global_mutex &data->mtx &c->lock irq_context: 0 misc_mtx rfkill_global_mutex &data->mtx &____s->seqcount irq_context: 0 cb_lock genl_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 cb_lock genl_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 cb_lock genl_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 cb_lock genl_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->event_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &rdev->mgmt_registrations_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx (work_completion)(&(&sdata->dec_tailroom_needed_wk)->work) irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx pin_fs_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &dentry->d_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 pin_fs_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 mount_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 mount_lock mount_lock.seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &dentry->d_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &fsnotify_mark_srcu irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &xa->xa_lock#8 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &obj_hash[i].lock pool_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override pool_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &dentry->d_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &fsnotify_mark_srcu irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_lock_key#7 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &s->s_inode_list_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &xa->xa_lock#8 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock mount_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx mount_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx mount_lock mount_lock.seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &rdev->wiphy_work_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx (&dwork->timer) irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &base->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx (work_completion)(&(&link->color_collision_detect_work)->work) irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->chanctx_mtx irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx fs_reclaim irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &c->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &fq->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx nl_table_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx nl_table_wait.lock irq_context: 0 cb_lock rtnl_mutex.wait_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex quarantine_lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &base->lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 rtnl_mutex _xmit_ETHER &local->filter_lock &____s->seqcount irq_context: 0 cb_lock &rdev->wiphy.mtx rtnl_mutex.wait_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &p->pi_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->chanctx_mtx irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &rdev->wiphy_work_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &lock->wait_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy3 irq_context: 0 (wq_completion)phy3 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy3 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 cb_lock &lock->wait_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &rdev->wiphy_work_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &list->lock#15 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &ifibss->incomplete_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx (console_sem).lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx console_lock console_srcu console_owner irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex _xmit_ETHER console_owner_lock irq_context: 0 rtnl_mutex _xmit_ETHER console_owner irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock key#16 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->softif_vlan_list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->softif_vlan_list_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->tt.changes_list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->tt.changes_list_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->tt.changes_list_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->tt.last_changeset_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->tt.last_changeset_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->tt.last_changeset_lock &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->tt.last_changeset_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->tvlv.container_list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tvlv.container_list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tvlv.container_list_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tvlv.container_list_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &bat_priv->forw_bat_list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &bat_priv->forw_bat_list_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &bat_priv->forw_bat_list_lock &base->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &bat_priv->forw_bat_list_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx console_owner_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx console_owner irq_context: 0 (wq_completion)hci1#2 (work_completion)(&hdev->cmd_work) &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &lock->wait_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &data->mutex irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx tk_core.seq.seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx hrtimer_bases.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &base->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &wdev->event_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)cfg80211 irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &lock->wait_lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &rq->__lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &lock->wait_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &lock->wait_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &p->pi_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->event_lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx fs_reclaim irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx &c->lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx pool_lock#2 irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx &obj_hash[i].lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx nl_table_lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx nl_table_wait.lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx &list->lock#2 irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &obj_hash[i].lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx pool_lock#2 irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events wireless_nlevent_work irq_context: 0 (wq_completion)events wireless_nlevent_work net_rwsem irq_context: 0 (wq_completion)events wireless_nlevent_work net_rwsem &list->lock#2 irq_context: 0 (wq_completion)events wireless_nlevent_work net_rwsem pool_lock#2 irq_context: 0 (wq_completion)events wireless_nlevent_work net_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)events wireless_nlevent_work net_rwsem nl_table_lock irq_context: 0 (wq_completion)events wireless_nlevent_work net_rwsem nl_table_wait.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock rcu_node_0 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &rcu_state.gp_wq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex dev_addr_sem quarantine_lock irq_context: 0 rtnl_mutex dev_addr_sem remove_cache_srcu &c->lock irq_context: 0 rtnl_mutex dev_addr_sem remove_cache_srcu &n->list_lock irq_context: 0 rtnl_mutex dev_addr_sem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem remove_cache_srcu &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex hwsim_radio_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 cb_lock genl_mutex hwsim_radio_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex hwsim_radio_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock genl_mutex hwsim_radio_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex hwsim_radio_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 cb_lock rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) &ndev->lock batched_entropy_u32.lock crngs.lock irq_context: softirq rcu_read_lock hwsim_radio_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex &c->lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &obj_hash[i].lock pool_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 cb_lock genl_mutex rfkill_global_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 remove_cache_srcu irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 remove_cache_srcu quarantine_lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 remove_cache_srcu &rq->__lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 remove_cache_srcu &c->lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 remove_cache_srcu &n->list_lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)phy4 irq_context: 0 (wq_completion)phy4 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy4 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 batched_entropy_u8.lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 kfence_freelist_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override &c->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &pcp->lock &zone->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &____s->seqcount irq_context: 0 rtnl_mutex dev_addr_sem _xmit_ETHER console_owner_lock irq_context: 0 rtnl_mutex dev_addr_sem _xmit_ETHER console_owner irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &fq->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->active_txq_lock[i] irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &local->active_txq_lock[i] irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &local->queue_stop_reason_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &fq->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &fq->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &list->lock#16 irq_context: softirq &list->lock#16 irq_context: 0 cb_lock rcu_read_lock &c->lock irq_context: softirq (&lapb->t1timer) &lapb->lock &n->list_lock irq_context: softirq (&lapb->t1timer) &lapb->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)phy5 irq_context: 0 (wq_completion)phy5 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy5 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx remove_cache_srcu irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx remove_cache_srcu &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rq->__lock irq_context: softirq rcu_read_lock hwsim_radio_lock pool_lock#2 irq_context: softirq rcu_read_lock hwsim_radio_lock &list->lock#16 irq_context: softirq rcu_read_lock lock#6 irq_context: softirq rcu_read_lock lock#6 kcov_remote_lock irq_context: softirq rcu_read_lock &local->rx_path_lock irq_context: softirq rcu_read_lock &local->rx_path_lock &list->lock#15 irq_context: softirq rcu_read_lock &local->rx_path_lock &rdev->wiphy_work_lock irq_context: softirq rcu_read_lock &local->rx_path_lock rcu_read_lock &pool->lock irq_context: softirq rcu_read_lock &local->rx_path_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &local->rx_path_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq rcu_read_lock &local->rx_path_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock &local->rx_path_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock hwsim_radio_lock &c->lock irq_context: softirq rcu_read_lock hwsim_radio_lock &n->list_lock irq_context: softirq rcu_read_lock hwsim_radio_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx lock#6 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx lock#6 kcov_remote_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->chanctx_mtx irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &sta->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &sta->lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &sta->rate_ctrl_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &sta->rate_ctrl_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &local->chanctx_mtx irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx pin_fs_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx nl_table_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx nl_table_wait.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &sta->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &sta->lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &sta->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &sta->lock krc.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx pool_lock#2 irq_context: 0 (wq_completion)events wireless_nlevent_work net_rwsem &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock &pcp->lock &zone->lock irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 cb_lock genl_mutex rfkill_global_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock krc.lock irq_context: 0 &type->s_umount_key#46/1 irq_context: 0 &type->s_umount_key#46/1 fs_reclaim irq_context: 0 &type->s_umount_key#46/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#46/1 pcpu_alloc_mutex irq_context: 0 &type->s_umount_key#46/1 pcpu_alloc_mutex pcpu_lock irq_context: 0 &type->s_umount_key#46/1 shrinker_rwsem irq_context: 0 &type->s_umount_key#46/1 list_lrus_mutex irq_context: 0 &type->s_umount_key#46/1 sb_lock irq_context: 0 &type->s_umount_key#46/1 sb_lock unnamed_dev_ida.xa_lock irq_context: 0 &type->s_umount_key#46/1 &c->lock irq_context: 0 &type->s_umount_key#46/1 pool_lock#2 irq_context: 0 &type->s_umount_key#46/1 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_lock_key#32 irq_context: 0 &type->s_umount_key#46/1 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#46/1 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_lock_key#32 &dentry->d_lock irq_context: 0 &type->s_umount_key#46/1 binderfs_minors_mutex irq_context: 0 &type->s_umount_key#46/1 binderfs_minors_mutex binderfs_minors.xa_lock irq_context: 0 &type->s_umount_key#46/1 &dentry->d_lock irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 &sb->s_type->i_lock_key#32 irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 rename_lock.seqcount irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 fs_reclaim irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 pool_lock#2 irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 &dentry->d_lock irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 rcu_read_lock rename_lock.seqcount irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 &dentry->d_lock &wq irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 &sb->s_type->i_lock_key#32 &dentry->d_lock irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 mmu_notifier_invalidate_range_start irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 &s->s_inode_list_lock irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 rcu_read_lock iunique_lock irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 tk_core.seq.seqcount irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 &c->lock irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem &c->lock irq_context: 0 &type->i_mutex_dir_key#2 namespace_sem &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 rename_lock.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &ul->lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 fs_reclaim irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u32.lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 &dentry->d_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 &root->kernfs_rwsem irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 tomoyo_ss irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 tomoyo_ss &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex &rq->__lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex fs_reclaim irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex &c->lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex &____s->seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex pool_lock#2 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex pcpu_alloc_mutex irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex lock kernfs_idr_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem tk_core.seq.seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex &obj_hash[i].lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex batched_entropy_u8.lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex kfence_freelist_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex css_set_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex cgroup_file_kn_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex batched_entropy_u32.lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex lock cgroup_idr_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex cgroup_idr_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex task_group_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 &dentry->d_lock &lru->node[i].lock irq_context: 0 &type->i_mutex_dir_key#6 irq_context: 0 &type->i_mutex_dir_key#6 fs_reclaim irq_context: 0 &type->i_mutex_dir_key#6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#6 &c->lock irq_context: 0 &type->i_mutex_dir_key#6 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#6 rcu_read_lock rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem inode_hash_lock irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem fs_reclaim irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem inode_hash_lock &sb->s_type->i_lock_key#30 irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem inode_hash_lock &s->s_inode_list_lock irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem tk_core.seq.seqcount irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem &sb->s_type->i_lock_key#30 irq_context: 0 &type->i_mutex_dir_key#6 &sb->s_type->i_lock_key#30 irq_context: 0 &type->i_mutex_dir_key#6 &sb->s_type->i_lock_key#30 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#6 &sb->s_type->i_lock_key#30 &dentry->d_lock &wq irq_context: 0 kn->active#53 fs_reclaim irq_context: 0 kn->active#53 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#53 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#53 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#53 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#53 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#53 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 sb_writers#10 &c->lock irq_context: 0 sb_writers#10 &____s->seqcount irq_context: 0 sb_writers#10 pool_lock#2 irq_context: 0 kn->active#54 fs_reclaim irq_context: 0 kn->active#54 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#54 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#54 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#54 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cgroup_threadgroup_rwsem.rss.gp_wait.lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_node_0 irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &rq->__lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &obj_hash[i].lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem inode_hash_lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem fs_reclaim irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem pool_lock#2 irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem inode_hash_lock &sb->s_type->i_lock_key#30 irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem inode_hash_lock &s->s_inode_list_lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem tk_core.seq.seqcount irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &sb->s_type->i_lock_key#30 irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &s->s_inode_list_lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &xa->xa_lock#8 irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &fsnotify_mark_srcu irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock cgroup_file_kn_lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock cgroup_file_kn_lock kernfs_notify_lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock cgroup_file_kn_lock kernfs_notify_lock rcu_read_lock &pool->lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock cgroup_file_kn_lock kernfs_notify_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock cgroup_file_kn_lock kernfs_notify_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock cgroup_file_kn_lock kernfs_notify_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock cgroup_file_kn_lock kernfs_notify_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &p->pi_lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &p->pi_lock &rq->__lock &obj_hash[i].lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &p->pi_lock &rq->__lock &base->lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &p->pi_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &rq->__lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events kernfs_notify_work &root->kernfs_supers_rwsem inode_hash_lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &p->pi_lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &p->pi_lock &rq->__lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &p->pi_lock &rq->__lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem.waiters.lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem.rss.gp_wait.lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem.rss.gp_wait.lock &obj_hash[i].lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem.rss.gp_wait.lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex (wq_completion)cpuset_migrate_mm irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex &wq->mutex irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex &wq->mutex &pool->lock/1 irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex &wq->mutex &x->wait#10 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 rename_lock.seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 fs_reclaim irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &dentry->d_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 tomoyo_ss irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex fs_reclaim irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &____s->seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex pcpu_alloc_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex lock kernfs_idr_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &root->kernfs_rwsem irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem tk_core.seq.seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &obj_hash[i].lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex pool_lock#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex css_set_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex lock cgroup_idr_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cgroup_idr_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_mutex jump_label_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_mutex jump_label_mutex text_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_mutex jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock cpuset_mutex callback_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &____s->seqcount#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &____s->seqcount#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &n->list_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &n->list_lock &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex percpu_counters_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &n->list_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex shrinker_rwsem irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex shrinker_rwsem fs_reclaim irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex shrinker_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex shrinker_rwsem pool_lock#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex shrinker_rwsem &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex shrinker_rwsem &n->list_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex shrinker_rwsem &n->list_lock &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex batched_entropy_u8.lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &pgdat->memcg_lru.lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &n->list_lock &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &dentry->d_lock &lru->node[i].lock irq_context: 0 &type->i_mutex_dir_key#7 irq_context: 0 &type->i_mutex_dir_key#7 fs_reclaim irq_context: 0 &type->i_mutex_dir_key#7 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#7 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#7 rcu_read_lock rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &____s->seqcount#2 irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem inode_hash_lock irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem fs_reclaim irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem inode_hash_lock &sb->s_type->i_lock_key#31 irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem inode_hash_lock &s->s_inode_list_lock irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem tk_core.seq.seqcount irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &sb->s_type->i_lock_key#31 irq_context: 0 &type->i_mutex_dir_key#7 &sb->s_type->i_lock_key#31 irq_context: 0 &type->i_mutex_dir_key#7 &sb->s_type->i_lock_key#31 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#7 &sb->s_type->i_lock_key#31 &dentry->d_lock &wq irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &____s->seqcount#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &____s->seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &n->list_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 pool_lock#2 irq_context: 0 kn->active#55 fs_reclaim irq_context: 0 kn->active#55 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#55 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#55 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#55 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cgroup_threadgroup_rwsem.rss.gp_wait.lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem fs_reclaim irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &c->lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem pool_lock#2 irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_mutex irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock cgroup_file_kn_lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &p->pi_lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &____s->seqcount#2 irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_mutex &p->pi_lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_mutex &p->pi_lock &rq->__lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_mutex &p->alloc_lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_mutex &p->alloc_lock &____s->seqcount#2 irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_mutex cpuset_attach_wq.lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_mutex &rq->__lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem cpuset_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &____s->seqcount#2 irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &p->alloc_lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &p->alloc_lock &memcg->mm_list.lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock &obj_hash[i].lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock pool_lock#2 irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem css_set_lock krc.lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem.waiters.lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem.rss.gp_wait.lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex (wq_completion)cpuset_migrate_mm irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex &wq->mutex irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex &wq->mutex &pool->lock/1 irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex &wq->mutex &x->wait#10 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 stock_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &xa->xa_lock#4 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock &____s->seqcount#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &xa->xa_lock#4 pool_lock#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &obj_hash[i].lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 stock_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem pool_lock#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &xa->xa_lock#4 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &xa->xa_lock#4 pool_lock#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem stock_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &____s->seqcount irq_context: 0 kn->active#56 fs_reclaim irq_context: 0 kn->active#56 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#56 stock_lock irq_context: 0 kn->active#56 pool_lock#2 irq_context: 0 kn->active#56 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#56 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#56 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#57 fs_reclaim irq_context: 0 kn->active#57 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#57 stock_lock irq_context: 0 kn->active#57 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#57 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#57 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &of->mutex kn->active#57 memcg_max_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 pool_lock#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &xa->xa_lock#4 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &xa->xa_lock#4 pool_lock#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &obj_hash[i].lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 stock_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex blkcg_pol_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex blkcg_pol_mutex fs_reclaim irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex blkcg_pol_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex blkcg_pol_mutex &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex blkcg_pol_mutex &____s->seqcount#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex blkcg_pol_mutex &____s->seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex blkcg_pol_mutex pool_lock#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex blkcg_pol_mutex pcpu_alloc_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex blkcg_pol_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex devcgroup_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cpu_hotplug_lock freezer_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex rtnl_mutex irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex rtnl_mutex &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &____s->seqcount#2 irq_context: 0 rtnl_mutex &ndev->lock &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock &____s->seqcount#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &____s->seqcount#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &____s->seqcount#2 irq_context: softirq rcu_callback cgroup_threadgroup_rwsem.rss.gp_wait.lock irq_context: softirq rcu_callback cgroup_threadgroup_rwsem.rss.gp_wait.lock &obj_hash[i].lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 tomoyo_ss &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock &____s->seqcount#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock &____s->seqcount#2 irq_context: 0 rtnl_mutex rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock &____s->seqcount#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock &____s->seqcount#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq rcu_callback stock_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &____s->seqcount#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex rtnl_mutex.wait_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &p->pi_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &p->pi_lock &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#7 stock_lock irq_context: 0 &type->i_mutex_dir_key#7 &c->lock irq_context: 0 &type->i_mutex_dir_key#7 pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &c->lock irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &xa->xa_lock#4 irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &xa->xa_lock#4 pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem stock_lock irq_context: 0 kn->active#55 stock_lock irq_context: 0 kn->active#55 &c->lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem freezer_mutex irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem freezer_mutex freezer_lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem freezer_mutex freezer_lock rcu_read_lock &sighand->siglock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem freezer_mutex freezer_lock &sighand->siglock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem freezer_mutex freezer_lock &sighand->siglock &p->pi_lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem freezer_mutex freezer_lock &sighand->siglock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &p->alloc_lock &newf->file_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount#2 irq_context: 0 &xa->xa_lock#4 pool_lock#2 irq_context: 0 ebt_mutex &c->lock irq_context: 0 ebt_mutex &____s->seqcount#2 irq_context: 0 ebt_mutex &____s->seqcount irq_context: 0 ebt_mutex &rq->__lock irq_context: 0 ebt_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nf_hook_mutex irq_context: 0 nf_hook_mutex fs_reclaim irq_context: 0 nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 nf_hook_mutex stock_lock irq_context: 0 nf_hook_mutex pool_lock#2 irq_context: 0 ebt_mutex &mm->mmap_lock irq_context: 0 nf_hook_mutex &c->lock irq_context: 0 nf_hook_mutex &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET elock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 stock_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh key#20 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &entry->crc_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 &mm->mmap_lock stock_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 &xt[i].mutex &mm->mmap_lock irq_context: 0 &xt[i].mutex free_vmap_area_lock irq_context: 0 &xt[i].mutex free_vmap_area_lock &obj_hash[i].lock irq_context: 0 &xt[i].mutex free_vmap_area_lock pool_lock#2 irq_context: 0 &xt[i].mutex vmap_area_lock irq_context: 0 &xt[i].mutex &____s->seqcount irq_context: 0 &xt[i].mutex &per_cpu(xt_recseq, i) irq_context: 0 &xt[i].mutex &obj_hash[i].lock irq_context: 0 &xt[i].mutex purge_vmap_area_lock irq_context: 0 &xt[i].mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &____s->seqcount#2 irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 cb_lock &____s->seqcount#2 irq_context: 0 (wq_completion)phy6 irq_context: 0 (wq_completion)phy6 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy6 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 nf_nat_proto_mutex irq_context: 0 nf_nat_proto_mutex fs_reclaim irq_context: 0 nf_nat_proto_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 nf_nat_proto_mutex pool_lock#2 irq_context: 0 nf_nat_proto_mutex nf_hook_mutex irq_context: 0 nf_nat_proto_mutex nf_hook_mutex fs_reclaim irq_context: 0 nf_nat_proto_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 nf_nat_proto_mutex nf_hook_mutex stock_lock irq_context: 0 nf_nat_proto_mutex nf_hook_mutex pool_lock#2 irq_context: 0 nf_nat_proto_mutex cpu_hotplug_lock irq_context: 0 nf_nat_proto_mutex &obj_hash[i].lock irq_context: 0 nf_nat_proto_mutex &obj_hash[i].lock pool_lock irq_context: 0 nf_nat_proto_mutex stock_lock irq_context: 0 rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &xt[i].mutex &c->lock irq_context: 0 &xt[i].mutex &n->list_lock irq_context: 0 nf_hook_mutex per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 nf_hook_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh pool_lock#2 irq_context: 0 nf_nat_proto_mutex &c->lock irq_context: 0 nf_nat_proto_mutex &____s->seqcount#2 irq_context: 0 nf_nat_proto_mutex &____s->seqcount irq_context: 0 pcpu_alloc_mutex fs_reclaim irq_context: 0 pcpu_alloc_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pcpu_alloc_mutex &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 &____s->seqcount#2 irq_context: 0 nf_hook_mutex rcu_read_lock pool_lock#2 irq_context: 0 nf_hook_mutex &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 clock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 elock-AF_INET6 irq_context: 0 &pipe->mutex/1 stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &xa->xa_lock#4 irq_context: 0 rtnl_mutex dev_addr_sem &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &fq->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->active_txq_lock[i] irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &local->active_txq_lock[i] irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &local->queue_stop_reason_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &fq->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &fq->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &list->lock#16 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &list->lock#16 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: hardirq hrtimer_bases.lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/1 _xmit_ETHER &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &xa->xa_lock#4 pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &journal->j_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &journal->j_state_lock &base->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle stock_lock irq_context: 0 loop_validate_mutex irq_context: 0 loop_validate_mutex &lo->lo_mutex irq_context: 0 &fsnotify_mark_srcu fs_reclaim irq_context: 0 &fsnotify_mark_srcu fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &fsnotify_mark_srcu pool_lock#2 irq_context: 0 &fsnotify_mark_srcu &group->notification_lock irq_context: 0 &fsnotify_mark_srcu &group->notification_waitq irq_context: 0 &fsnotify_mark_srcu &group->notification_waitq &ep->lock irq_context: 0 &fsnotify_mark_srcu &group->notification_waitq &ep->lock &ep->wq irq_context: 0 &fsnotify_mark_srcu &group->notification_waitq &ep->lock &ep->wq &p->pi_lock irq_context: 0 &fsnotify_mark_srcu &group->notification_waitq &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 &fsnotify_mark_srcu &group->notification_waitq &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &____s->seqcount#2 irq_context: 0 &xt[i].mutex purge_vmap_area_lock &obj_hash[i].lock irq_context: 0 &xt[i].mutex purge_vmap_area_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &list->lock#16 irq_context: 0 purge_vmap_area_lock &obj_hash[i].lock irq_context: 0 purge_vmap_area_lock pool_lock#2 irq_context: 0 &pipe->mutex/1 &mm->mmap_lock stock_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 stock_lock irq_context: 0 &type->i_mutex_dir_key#4 &____s->seqcount#2 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem stock_lock irq_context: 0 sb_writers#8 tomoyo_ss &____s->seqcount#2 irq_context: 0 &p->lock &____s->seqcount#2 irq_context: 0 &group->notification_waitq &ep->lock irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex irq_context: 0 &vma->vm_lock->lock stock_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &sb->s_type->i_lock_key#5 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &____s->seqcount#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#9 &xa->xa_lock#4 irq_context: 0 &sb->s_type->i_mutex_key#9 &xa->xa_lock#4 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#9 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#9 stock_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 stock_lock irq_context: 0 &r->consumer_lock irq_context: 0 &mm->mmap_lock stock_lock irq_context: 0 rtnl_mutex &wg->device_update_lock &n->list_lock irq_context: 0 rtnl_mutex &wg->device_update_lock &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock &xa->xa_lock#8 stock_lock irq_context: 0 &mm->mmap_lock &xa->xa_lock#8 pool_lock#2 irq_context: 0 rtnl_mutex &tb->tb6_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg0#5 irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq fs/notify/mark.c:89 rcu_read_lock &pool->lock/1 irq_context: softirq fs/notify/mark.c:89 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq fs/notify/mark.c:89 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx &p->pi_lock &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &____s->seqcount#2 irq_context: 0 rtnl_mutex lweventlist_lock &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &____s->seqcount#2 irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx &____s->seqcount#2 irq_context: 0 rtnl_mutex net_rwsem pool_lock#2 irq_context: 0 rtnl_mutex net_rwsem &obj_hash[i].lock irq_context: 0 rtnl_mutex net_rwsem nl_table_lock irq_context: 0 rtnl_mutex net_rwsem quarantine_lock irq_context: 0 rtnl_mutex net_rwsem nl_table_wait.lock irq_context: 0 rtnl_mutex &wg->device_update_lock rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg1#5 irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 rtnl_mutex &ndev->lock &tb->tb6_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock irq_context: softirq (&peer->timer_persistent_keepalive) irq_context: softirq (&peer->timer_persistent_keepalive) pool_lock#2 irq_context: softirq (&peer->timer_persistent_keepalive) &list->lock#14 irq_context: softirq (&peer->timer_persistent_keepalive) tk_core.seq.seqcount irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh &r->producer_lock#2 irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0#6 irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg1#6 irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &list->lock#14 irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#3 irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 rtnl_mutex &macvlan_netdev_addr_lock_key/1 &c->lock irq_context: 0 rtnl_mutex &macvlan_netdev_addr_lock_key/1 &____s->seqcount#2 irq_context: 0 rtnl_mutex &macvlan_netdev_addr_lock_key/1 &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&port->bc_work) &rq->__lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &meta->lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &____s->seqcount irq_context: 0 &type->s_umount_key#46/1 &____s->seqcount#2 irq_context: 0 &type->s_umount_key#46/1 &____s->seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex &____s->seqcount#2 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 kn->active#53 &c->lock irq_context: 0 kn->active#53 &____s->seqcount#2 irq_context: 0 kn->active#53 &____s->seqcount irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex dev_addr_sem rcu_node_0 irq_context: 0 rtnl_mutex dev_addr_sem &rcu_state.expedited_wq irq_context: 0 rtnl_mutex dev_addr_sem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rtnl_mutex dev_addr_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex dev_addr_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &____s->seqcount#2 irq_context: 0 stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#57 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 kn->active#57 stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex blkcg_pol_mutex &n->list_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex rtnl_mutex &rq->__lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->softif_vlan_list_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->softif_vlan_list_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->softif_vlan_list_lock &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &rcu_state.expedited_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock pool_lock#2 irq_context: 0 misc_mtx rfkill_global_mutex &data->mtx &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg2#5 irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 cb_lock genl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &rq->__lock irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &____s->seqcount#2 irq_context: 0 &type->i_mutex_dir_key#7 &root->kernfs_rwsem &____s->seqcount irq_context: 0 &xt[i].mutex init_mm.page_table_lock irq_context: 0 &xt[i].mutex &____s->seqcount#2 irq_context: 0 nf_nat_proto_mutex nf_hook_mutex &c->lock irq_context: 0 nf_nat_proto_mutex nf_hook_mutex &____s->seqcount#2 irq_context: 0 nf_nat_proto_mutex nf_hook_mutex &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 &fsnotify_mark_srcu &rq->__lock irq_context: 0 &fsnotify_mark_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#5 &____s->seqcount#2 irq_context: 0 &p->lock &of->mutex kn->active#5 &____s->seqcount#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#6 irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &list->lock#14 irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 &type->i_mutex_dir_key#4 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex uevent_sock_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 cb_lock genl_mutex uevent_sock_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex uevent_sock_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex uevent_sock_mutex &rq->__lock irq_context: 0 cb_lock genl_mutex uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &sem->wait_lock irq_context: 0 cb_lock genl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 cb_lock genl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 &sighand->siglock &p->pi_lock irq_context: 0 &sighand->siglock &p->pi_lock &rq->__lock irq_context: 0 &sighand->siglock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 &sighand->siglock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 tasklist_lock &p->alloc_lock irq_context: 0 cb_lock genl_mutex &sem->wait_lock irq_context: 0 cb_lock genl_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 cb_lock genl_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 cb_lock genl_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem stock_lock irq_context: 0 sb_writers#8 tomoyo_ss &rq->__lock irq_context: 0 sb_writers#8 tomoyo_ss &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock pgd_lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock stock_lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock key irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock pcpu_lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock percpu_counters_lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock pcpu_lock stock_lock irq_context: 0 tasklist_lock stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &fsnotify_mark_srcu irq_context: 0 sb_writers#4 &s->s_inode_list_lock irq_context: 0 sb_writers#4 sb_internal irq_context: 0 sb_writers#4 sb_internal mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 sb_internal pool_lock#2 irq_context: 0 sb_writers#4 sb_internal &journal->j_state_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &xa->xa_lock#8 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_es_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 sb_internal &obj_hash[i].lock irq_context: 0 sb_writers#4 inode_hash_lock irq_context: 0 sb_writers#4 inode_hash_lock &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &fsnotify_mark_srcu irq_context: 0 sb_writers#4 &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &mapping->private_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &journal->j_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &sbi->s_orphan_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rename_lock.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &dentry->d_lock &dentry->d_lock/1 &lru->node[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &dentry->d_lock &dentry->d_lock &lru->node[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &wb->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &journal->j_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &journal->j_revoke_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &xa->xa_lock#8 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &rq->__lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] &____s->seqcount#2 irq_context: 0 purge_vmap_area_lock &____s->seqcount irq_context: 0 purge_vmap_area_lock rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fill_pool_map-wait-type-override pool_lock irq_context: softirq fs/notify/mark.c:89 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq fs/notify/mark.c:89 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &p->lock &of->mutex kn->active#5 rcu_read_lock rcu_node_0 irq_context: 0 &p->lock &of->mutex kn->active#5 rcu_read_lock &rq->__lock irq_context: 0 &p->lock &of->mutex kn->active#5 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 slock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rlock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &list->lock#17 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 &sctp_ep_hashtable[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 clock-AF_INET6 irq_context: 0 sb_writers#8 &xattrs->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 nfnl_subsys_ipset fs_reclaim irq_context: 0 nfnl_subsys_ipset fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 nfnl_subsys_ipset pool_lock#2 irq_context: 0 nfnl_subsys_ipset &____s->seqcount irq_context: 0 nfnl_subsys_ipset stock_lock irq_context: 0 nfnl_subsys_ipset crngs.lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &q->instances_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex remove_cache_srcu irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &log->instances_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex irq_context: 0 pcpu_lock stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &mapping->private_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &bgl->locks[i].lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &____s->seqcount#2 irq_context: 0 &xt[i].mutex &rq->__lock irq_context: 0 &xt[i].mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ACCESS_PRIVATE(ssp->srcu_sup, lock) &ACCESS_PRIVATE(sdp, lock) irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 nfnl_subsys_ipset &obj_hash[i].lock irq_context: 0 nfnl_subsys_ipset rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount#2 irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &ndev->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) &c->lock irq_context: softirq (&ndev->rs_timer) &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) &____s->seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &n->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx lock kernfs_idr_lock &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx lock kernfs_idr_lock &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx lock kernfs_idr_lock &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex stock_lock irq_context: 0 vsock_table_lock irq_context: 0 sk_lock-AF_VSOCK irq_context: 0 sk_lock-AF_VSOCK slock-AF_VSOCK irq_context: 0 slock-AF_VSOCK irq_context: 0 cls_mod_lock irq_context: 0 sb_writers#8 tomoyo_ss pool_lock#2 irq_context: 0 &group->mark_mutex &____s->seqcount#2 irq_context: 0 &sig->cred_guard_mutex &____s->seqcount#2 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &____s->seqcount#2 irq_context: 0 &mm->mmap_lock &____s->seqcount#2 irq_context: 0 sb_writers#8 kn->active#5 &____s->seqcount#2 irq_context: 0 sb_writers#8 kn->active#5 &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &vma->vm_lock->lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 &vma->vm_lock->lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_NETLINK &rq->__lock irq_context: 0 sk_lock-AF_NETLINK &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nfnl_subsys_ipset &c->lock irq_context: 0 nfnl_subsys_ipset &____s->seqcount#2 irq_context: 0 &mm->mmap_lock &vma->vm_lock->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET slock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 sk_lock-AF_INET slock-AF_INET rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 sk_lock-AF_INET slock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 sk_lock-AF_INET slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: 0 sk_lock-AF_INET slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET slock-AF_INET &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET slock-AF_INET &base->lock irq_context: 0 sk_lock-AF_INET slock-AF_INET &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &ei->i_es_lock key#2 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &____s->seqcount#2 irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 alg_types_sem irq_context: 0 sk_lock-AF_ALG irq_context: 0 sk_lock-AF_ALG slock-AF_ALG irq_context: 0 slock-AF_ALG irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock irq_context: 0 rtnl_mutex dev_addr_sem &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock once_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock once_lock crngs.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] &____s->seqcount#2 irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &____s->seqcount#2 irq_context: 0 (wq_completion)phy7 irq_context: 0 (wq_completion)phy7 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy7 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 cb_lock &pcp->lock &zone->lock irq_context: 0 cb_lock &pcp->lock &zone->lock &____s->seqcount irq_context: softirq (&ndev->rs_timer) &n->list_lock irq_context: softirq (&ndev->rs_timer) &n->list_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12/4 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK slock-AF_VSOCK irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK vsock_table_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK clock-AF_VSOCK irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK rlock-AF_VSOCK irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_VSOCK irq_context: 0 &sighand->siglock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &bgl->locks[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 fs_reclaim irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &ei->i_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &dentry->d_lock &wq#2 irq_context: 0 &f->f_pos_lock sb_writers#4 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->xattr_sem irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 stock_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#8 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 lock#4 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mapping->private_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &(ei->i_block_reservation_lock) irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &memcg->move_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &xa->xa_lock#8 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_lock_key#22 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &wb->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_raw_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#8 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#8 &xa->xa_lock#4 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#8 &xa->xa_lock#4 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#8 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#8 stock_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 lock#4 &lruvec->lru_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_es_lock key#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &xa->xa_lock#8 key#10 irq_context: 0 &mm->mmap_lock sb_writers#4 mount_lock irq_context: 0 &mm->mmap_lock sb_writers#4 tk_core.seq.seqcount irq_context: 0 &mm->mmap_lock sb_writers#4 mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock sb_writers#4 pool_lock#2 irq_context: 0 &mm->mmap_lock sb_writers#4 &journal->j_state_lock irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle &ei->i_raw_lock irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle &journal->j_wait_updates irq_context: 0 &mm->mmap_lock sb_writers#4 &obj_hash[i].lock irq_context: 0 &sighand->siglock stock_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 stock_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 sysctl_lock irq_context: 0 rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &xt[i].mutex &n->list_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 lock#4 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 lock#4 &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &xa->xa_lock#8 irq_context: 0 sb_writers#4 &mapping->private_lock irq_context: 0 sb_writers#4 &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 sb_writers#4 stock_lock irq_context: 0 sb_writers#4 &sb->s_type->i_lock_key#22 &xa->xa_lock#8 irq_context: 0 sb_writers#4 lock#4 irq_context: 0 sb_writers#4 lock#5 irq_context: 0 sb_writers#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_lock_key#22 &xa->xa_lock#8 &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_lock_key#22 &xa->xa_lock#8 pool_lock#2 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_prealloc_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_es_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_es_lock &sbi->s_es_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &(ei->i_block_reservation_lock) irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &journal->j_list_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle pool_lock#2 irq_context: 0 sb_writers#4 integrity_iint_lock irq_context: 0 sb_writers#8 tomoyo_ss &pcp->lock &zone->lock irq_context: 0 sb_writers#8 tomoyo_ss &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock &n->list_lock &c->lock irq_context: softirq rcu_read_lock &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex remove_cache_srcu irq_context: 0 cb_lock genl_mutex remove_cache_srcu quarantine_lock irq_context: 0 cb_lock genl_mutex remove_cache_srcu &c->lock irq_context: 0 cb_lock genl_mutex remove_cache_srcu &n->list_lock irq_context: 0 cb_lock genl_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex lock kernfs_idr_lock &c->lock irq_context: 0 cb_lock genl_mutex lock kernfs_idr_lock &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 cb_lock genl_mutex lock kernfs_idr_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 cb_lock genl_mutex lock kernfs_idr_lock &____s->seqcount irq_context: 0 cb_lock genl_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock console_owner_lock irq_context: 0 rtnl_mutex rcu_read_lock console_owner irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 rcu_read_lock &ndev->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 &____s->seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx &pcp->lock &zone->lock irq_context: 0 &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&port->bc_work) quarantine_lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0#7 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx quarantine_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu quarantine_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu &n->list_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &n->list_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &macsec_netdev_addr_lock_key/1 _xmit_ETHER &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &base->lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 cb_lock rtnl_mutex rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg0#8 irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#8 (work_completion)(&peer->transmit_handshake_work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#7 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#9 irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-kex-wg0#9 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#8 irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &list->lock#14 irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#4 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)phy8 irq_context: 0 (wq_completion)phy8 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy8 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 rtnl_mutex &wg->device_update_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg1#9 irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg0#10 irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#10 irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &list->lock#14 irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#5 irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg2#7 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg0#5 irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#8 irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &list->lock#14 irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &fq->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->active_txq_lock[i] irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &local->active_txq_lock[i] irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &local->queue_stop_reason_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &fq->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &fq->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &list->lock#16 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &list->lock#16 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &ul->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u32.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &fq->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->active_txq_lock[i] irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &local->active_txq_lock[i] irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &local->queue_stop_reason_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &fq->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &fq->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &list->lock#16 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &list->lock#16 irq_context: 0 &mm->mmap_lock &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock console_owner_lock irq_context: 0 rtnl_mutex dev_addr_sem rcu_read_lock console_owner irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock &ifibss->incomplete_lock irq_context: softirq rcu_read_lock &rdev->wiphy_work_lock irq_context: softirq rcu_read_lock &local->rx_path_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &local->rx_path_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock rcu_read_lock &sta->rate_ctrl_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock rcu_read_lock &sta->rate_ctrl_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock rcu_read_lock &sta->rate_ctrl_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock rcu_read_lock &sta->rate_ctrl_lock krc.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &base->lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)wg-kex-wg2#9 irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 cb_lock genl_mutex fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex &c->lock irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)wg-kex-wg2#10 irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &list->lock#14 irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#5 irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 &type->s_umount_key#46/1 &sb->s_type->i_mutex_key#20 &____s->seqcount#2 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex &pcp->lock &zone->lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &pool->lock &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &base->lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)phy9 irq_context: 0 (wq_completion)phy9 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy9 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 cb_lock genl_mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 tomoyo_ss quarantine_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex shrinker_rwsem &____s->seqcount irq_context: 0 kn->active#57 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#57 &kernfs_locks->open_file_mutex[count] &____s->seqcount#2 irq_context: 0 kn->active#57 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 kn->active#57 &c->lock irq_context: 0 kn->active#57 &____s->seqcount#2 irq_context: 0 kn->active#57 &n->list_lock irq_context: 0 kn->active#57 &n->list_lock &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 tomoyo_ss &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 tomoyo_ss &n->list_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx &rq->__lock irq_context: 0 kn->active#55 &____s->seqcount irq_context: 0 kn->active#55 pool_lock#2 irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &pool->lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx pcpu_alloc_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx pcpu_alloc_mutex &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex remove_cache_srcu irq_context: 0 &xt[i].mutex remove_cache_srcu quarantine_lock irq_context: 0 &xt[i].mutex remove_cache_srcu &c->lock irq_context: 0 &xt[i].mutex remove_cache_srcu &n->list_lock irq_context: 0 &xt[i].mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &xt[i].mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex.wait_lock irq_context: 0 (wq_completion)events pcpu_balance_work &p->pi_lock irq_context: 0 (wq_completion)events pcpu_balance_work &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events pcpu_balance_work &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 batched_entropy_u8.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 kfence_freelist_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 cb_lock genl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&pool->mayday_timer) &pool->lock irq_context: softirq (&pool->mayday_timer) &pool->lock wq_mayday_lock irq_context: 0 (wq_completion)wg-kex-wg0#11 irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-kex-wg1#11 irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg0#12 irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#12 irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &list->lock#14 irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#6 irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#6 irq_context: 0 (wq_completion)wg-crypt-wg0#6 &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem &sem->wait_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock rcu_node_0 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tvlv.container_list_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tvlv.container_list_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tvlv.container_list_lock &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tvlv.container_list_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &____s->seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &rq->__lock irq_context: 0 cb_lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex irq_context: 0 &net->xfrm.xfrm_cfg_mutex fs_reclaim irq_context: 0 &net->xfrm.xfrm_cfg_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &net->xfrm.xfrm_cfg_mutex pool_lock#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock pool_lock#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex rlock-AF_NETLINK irq_context: 0 misc_mtx &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex rcu_read_lock &c->lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &n->list_lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#11 irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &cookie->lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-kex-wg2#12 irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &list->lock#14 irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#6 irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &r->consumer_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &rq->__lock irq_context: 0 (wq_completion)phy11 irq_context: 0 (wq_completion)phy11 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy11 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx lweventlist_lock &c->lock irq_context: 0 (wq_completion)phy10 irq_context: 0 (wq_completion)phy10 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy10 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &c->lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &lock->wait_lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &p->pi_lock irq_context: 0 (wq_completion)events wireless_nlevent_work net_rwsem &n->list_lock irq_context: 0 (wq_completion)events wireless_nlevent_work net_rwsem &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &base->lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER &local->filter_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER &local->filter_lock &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_ETHER &local->filter_lock &____s->seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 tomoyo_ss &c->lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 tomoyo_ss &____s->seqcount#2 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 tomoyo_ss &____s->seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 kn->active#53 pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &____s->seqcount#2 irq_context: 0 (wq_completion)phy12 irq_context: 0 (wq_completion)phy12 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy12 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex console_owner_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex console_owner irq_context: 0 cb_lock genl_mutex rtnl_mutex console_lock console_srcu console_owner_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex console_lock console_srcu console_owner irq_context: 0 cb_lock genl_mutex rtnl_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &c->lock irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &____s->seqcount#2 irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#5 &____s->seqcount#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex cgroup_mutex.wait_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex.wait_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &p->pi_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex rcu_read_lock &pool->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 kn->active#55 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#55 &kernfs_locks->open_file_mutex[count] &____s->seqcount#2 irq_context: 0 kn->active#55 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 kn->active#57 &____s->seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &sem->wait_lock irq_context: 0 sb_writers#11 &sem->wait_lock irq_context: 0 sb_writers#11 &p->pi_lock irq_context: 0 sb_writers#11 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#11 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#7 &sem->wait_lock irq_context: 0 kn->active#55 &____s->seqcount#2 irq_context: 0 kn->active#55 &n->list_lock irq_context: 0 kn->active#55 &n->list_lock &c->lock irq_context: 0 nf_nat_proto_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 nf_nat_proto_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&hdev->cmd_timer)->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 batched_entropy_u8.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &sta->lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &sta->lock &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &sta->lock &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &c->lock irq_context: 0 nf_hook_mutex &rq->__lock irq_context: 0 nf_hook_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &c->lock irq_context: 0 (wq_completion)phy13 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock stock_lock irq_context: 0 (wq_completion)phy13 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy13 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu quarantine_lock irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu &c->lock irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu &n->list_lock irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu &rq->__lock irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &rq->__lock irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &____s->seqcount#7 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &nf_nat_locks[i] irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 &sb->s_type->i_mutex_key#10 &hashinfo->lock#2 irq_context: 0 nf_hook_mutex &cfs_rq->removed.lock irq_context: 0 rtnl_mutex _xmit_ETHER rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex _xmit_ETHER rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &ei->i_es_lock key#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle batched_entropy_u32.lock crngs.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &pcp->lock &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#9 fs_reclaim &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#9 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock &____s->seqcount#2 irq_context: 0 (wq_completion)events_unbound (reaper_work).work rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#9 batched_entropy_u8.lock irq_context: 0 &sb->s_type->i_mutex_key#9 kfence_freelist_lock irq_context: 0 &group->notification_waitq &ep->lock &ep->wq irq_context: 0 &group->notification_waitq &ep->lock &ep->wq &p->pi_lock irq_context: 0 &group->notification_waitq &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 &group->notification_waitq &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_mutex_key#9 &____s->seqcount#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u32.lock crngs.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex &____s->seqcount irq_context: 0 &pipe->mutex/1 sk_lock-AF_NETLINK irq_context: 0 &pipe->mutex/1 sk_lock-AF_NETLINK slock-AF_NETLINK irq_context: 0 &pipe->mutex/1 sk_lock-AF_NETLINK rcu_read_lock rhashtable_bucket irq_context: 0 &pipe->mutex/1 slock-AF_NETLINK irq_context: 0 &pipe->mutex/1 free_vmap_area_lock irq_context: 0 &pipe->mutex/1 vmap_area_lock irq_context: 0 &pipe->mutex/1 init_mm.page_table_lock irq_context: 0 &pipe->mutex/1 rcu_read_lock &zone->lock irq_context: 0 &pipe->mutex/1 rcu_read_lock &zone->lock &____s->seqcount irq_context: 0 &pipe->mutex/1 rcu_read_lock &____s->seqcount irq_context: 0 &pipe->mutex/1 rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 &pipe->mutex/1 rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 rcu_read_lock rcu_read_lock_bh &zone->lock irq_context: 0 &pipe->mutex/1 rcu_read_lock rcu_read_lock_bh &zone->lock &____s->seqcount irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 &pipe->mutex/1 &c->lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_nftables irq_context: 0 &pipe->mutex/1 nfnl_subsys_nftables &nft_net->commit_mutex irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex fs_reclaim irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex stock_lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex &c->lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex &____s->seqcount#2 irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex &____s->seqcount irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex pool_lock#2 irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_read_lock pool_lock#2 irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex batched_entropy_u32.lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex (console_sem).lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex console_lock console_srcu console_owner_lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex console_lock console_srcu console_owner irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &____s->seqcount#2 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &tsk->futex_exit_mutex &rq->__lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex &rq->__lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock krc.lock &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock krc.lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_read_lock rcu_node_0 irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex &n->list_lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_read_lock rhashtable_bucket irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_read_lock &rq->__lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu quarantine_lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu &c->lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu &n->list_lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex (work_completion)(&ht->run_work) irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex rcu_read_lock &pool->lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex &ht->mutex irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex &ht->mutex &rq->__lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex &ht->mutex &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 &nft_net->commit_mutex &ht->mutex pool_lock#2 irq_context: 0 &pipe->mutex/1 purge_vmap_area_lock irq_context: 0 &pipe->mutex/1 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh key#20 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &entry->crc_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &c->lock irq_context: 0 rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy14 irq_context: 0 (wq_completion)phy14 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy14 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex remove_cache_srcu &rq->__lock irq_context: 0 &xt[i].mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &xt[i].mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &xt[i].mutex remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex remove_cache_srcu rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &xt[i].mutex remove_cache_srcu rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex remove_cache_srcu pgd_lock irq_context: 0 &xt[i].mutex remove_cache_srcu stock_lock irq_context: 0 &xt[i].mutex remove_cache_srcu key irq_context: 0 &xt[i].mutex remove_cache_srcu pcpu_lock irq_context: 0 &xt[i].mutex remove_cache_srcu percpu_counters_lock irq_context: 0 &xt[i].mutex remove_cache_srcu pcpu_lock stock_lock irq_context: 0 &xt[i].mutex remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 tomoyo_ss &n->list_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 tomoyo_ss &n->list_lock &c->lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex remove_cache_srcu irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex remove_cache_srcu quarantine_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &base->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6/1 cgroup_mutex rcu_read_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#6 &root->kernfs_rwsem &c->lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem &c->lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem &____s->seqcount#2 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem &____s->seqcount irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &rq->__lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#10 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &x->wait#3 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex rcu_read_lock &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#7 &____s->seqcount#2 irq_context: 0 &type->i_mutex_dir_key#7 &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#7 &n->list_lock irq_context: 0 nf_nat_proto_mutex &rq->__lock irq_context: 0 nf_nat_proto_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nf_hook_mutex fs_reclaim &rq->__lock irq_context: 0 nf_hook_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &____s->seqcount#2 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &____s->seqcount#2 irq_context: 0 sb_writers#4 &rq->__lock irq_context: 0 sb_writers#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&peer->timer_persistent_keepalive) &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &ul->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u32.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock quarantine_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &n->list_lock &c->lock irq_context: 0 sk_lock-AF_ALG fs_reclaim irq_context: 0 sk_lock-AF_ALG fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_ALG fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sk_lock-AF_ALG fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_callback stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG pool_lock#2 irq_context: 0 sk_lock-AF_ALG &obj_hash[i].lock irq_context: 0 sk_lock-AF_ALG &dir->lock irq_context: 0 sk_lock-AF_ALG &rq->__lock irq_context: 0 sk_lock-AF_ALG &ei->socket.wq.wait irq_context: 0 &xt[i].mutex rcu_read_lock rcu_node_0 irq_context: 0 &xt[i].mutex rcu_read_lock &rq->__lock irq_context: 0 &xt[i].mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_read_lock &p->alloc_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle key#4 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &dentry->d_lock irq_context: 0 sb_writers#4 rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#4 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &ei->xattr_sem irq_context: 0 sb_writers#4 tomoyo_ss &rq->__lock irq_context: 0 sb_writers#4 tomoyo_ss &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 integrity_iint_lock irq_context: 0 sb_writers#4 &iint->mutex irq_context: 0 sb_writers#4 tomoyo_ss &____s->seqcount#2 irq_context: 0 sb_writers#4 tomoyo_ss &n->list_lock irq_context: 0 sb_writers#4 tomoyo_ss &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &ei->i_prealloc_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &mapping->private_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &ret->b_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &ret->b_state_lock &journal->j_list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &pa->pa_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &lg->lg_prealloc_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &q->queue_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &q->queue_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &q->queue_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &q->queue_lock pcpu_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &q->queue_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &q->queue_lock &obj_hash[i].lock pool_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &q->queue_lock percpu_counters_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &q->queue_lock &blkcg->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &q->queue_lock &blkcg->lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &dd->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &dd->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &base->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 (&timer.timer) irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_wait_commit irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_wait_done_commit irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &journal->j_state_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &journal->j_state_lock &base->lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle bit_wait_table + i irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG (console_sem).lock irq_context: 0 sk_lock-AF_ALG console_lock console_srcu console_owner_lock irq_context: 0 sk_lock-AF_ALG console_lock console_srcu console_owner irq_context: 0 sk_lock-AF_ALG console_lock console_srcu console_owner &port_lock_key irq_context: 0 sk_lock-AF_ALG console_lock console_srcu console_owner console_owner_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &ndev->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: 0 sk_lock-AF_ALG &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG &c->lock irq_context: 0 sk_lock-AF_ALG &____s->seqcount irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &____s->seqcount irq_context: 0 sk_lock-AF_ALG rcu_read_lock &ei->socket.wq.wait irq_context: 0 sk_lock-AF_ALG rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 sk_lock-AF_ALG rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_ALG rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG slock-AF_ALG &sk->sk_lock.wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) quarantine_lock irq_context: 0 slock-AF_ALG &sk->sk_lock.wq irq_context: 0 slock-AF_ALG &sk->sk_lock.wq &p->pi_lock irq_context: 0 slock-AF_ALG &sk->sk_lock.wq &p->pi_lock &rq->__lock irq_context: 0 slock-AF_ALG &sk->sk_lock.wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock fs_reclaim irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock &____s->seqcount irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock pool_lock#2 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock stock_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock ptlock_ptr(page)#2 lock#4 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock ptlock_ptr(page)#2 lock#4 &lruvec->lru_lock irq_context: 0 sk_lock-AF_ALG rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_ALG rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 fs_reclaim irq_context: 0 sb_writers#4 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 mapping.invalidate_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 mapping.invalidate_lock &____s->seqcount irq_context: 0 sb_writers#4 mapping.invalidate_lock pool_lock#2 irq_context: 0 sb_writers#4 mapping.invalidate_lock stock_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &xa->xa_lock#8 irq_context: 0 sb_writers#4 mapping.invalidate_lock lock#4 irq_context: 0 sb_writers#4 mapping.invalidate_lock &xa->xa_lock#8 pool_lock#2 irq_context: 0 sb_writers#4 mapping.invalidate_lock &xa->xa_lock#8 &xa->xa_lock#4 irq_context: 0 sb_writers#4 mapping.invalidate_lock &xa->xa_lock#8 &obj_hash[i].lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &xa->xa_lock#8 stock_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &ei->i_es_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 mapping.invalidate_lock &dd->lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &dd->lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 sb_writers#4 &folio_wait_table[i] irq_context: 0 sb_writers#4 &rq->__lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 sb_writers#4 mount_lock irq_context: 0 sb_writers#4 sb_writers#4 tk_core.seq.seqcount irq_context: 0 sb_writers#4 sb_writers#4 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 sb_writers#4 pool_lock#2 irq_context: 0 sb_writers#4 sb_writers#4 &journal->j_state_lock irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle &mapping->private_lock irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle &ret->b_state_lock irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle bit_wait_table + i irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle &rq->__lock irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 sb_writers#4 &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sem->wait_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ret->b_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &ret->b_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ret->b_state_lock &journal->j_list_lock rcu_read_lock &xa->xa_lock#8 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &f->f_pos_lock sb_writers#4 &sem->wait_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_lock_key#22 &xa->xa_lock#8 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_lock_key#22 &xa->xa_lock#8 &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_lock_key#22 &xa->xa_lock#8 pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 lock#4 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 lock#5 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &dd->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &dd->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &base->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &mapping->private_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 (&timer.timer) irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_wait_commit irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_wait_done_commit irq_context: 0 &f->f_pos_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &mm->mmap_lock &rq->__lock irq_context: 0 sb_writers#8 &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal &journal->j_state_lock irq_context: 0 sb_writers#4 sb_internal &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 sb_internal &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal &journal->j_state_lock &base->lock irq_context: 0 sb_writers#4 sb_internal &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 sb_internal jbd2_handle &____s->seqcount irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock &ret->b_state_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock bit_wait_table + i irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 sb_internal jbd2_handle &mapping->private_lock irq_context: 0 &xt[i].mutex pgd_lock irq_context: 0 &xt[i].mutex stock_lock irq_context: 0 &xt[i].mutex key irq_context: 0 &xt[i].mutex pcpu_lock irq_context: 0 &xt[i].mutex percpu_counters_lock irq_context: 0 &xt[i].mutex pcpu_lock stock_lock irq_context: 0 &ret->b_state_lock rcu_read_lock pool_lock#2 irq_context: 0 &ret->b_state_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &n->list_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &table->lock#3 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sem->wait_lock irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 &xt[i].mutex &mm->mmap_lock &rq->__lock irq_context: 0 &xt[i].mutex &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &rcu_state.gp_wq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cgroup_threadgroup_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &xt[i].mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex &lock->wait_lock irq_context: 0 &disk->open_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex &____s->seqcount#2 irq_context: 0 sb_writers#4 tomoyo_ss &pcp->lock &zone->lock irq_context: 0 sb_writers#4 tomoyo_ss &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 tomoyo_ss &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &pcp->lock &zone->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &pa->pa_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &n->list_lock &c->lock irq_context: 0 sk_lock-AF_ALG &n->list_lock irq_context: 0 sk_lock-AF_ALG &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &base->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ret->b_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &ret->b_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &sbi->s_md_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &ei->i_prealloc_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem key#3 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &pa->pa_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &dd->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &dd->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &base->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 (&timer.timer) irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_wait_commit irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_wait_done_commit irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &ret->b_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock &pa->pa_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &fsnotify_mark_srcu &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &c->lock irq_context: softirq rcu_read_lock &table->lock#3 irq_context: softirq rcu_read_lock rcu_read_lock &table->lock#3 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &table->lock#3 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sem->wait_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 mapping.invalidate_lock &rq->__lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 mapping.invalidate_lock &sem->wait_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sb->s_type->i_lock_key#22 &xa->xa_lock#8 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock lock#4 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock lock#5 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &lruvec->lru_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sb->s_type->i_lock_key#22 &xa->xa_lock#8 &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sb->s_type->i_lock_key#22 &xa->xa_lock#8 pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem remove_cache_srcu irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &tsk->futex_exit_mutex &cfs_rq->removed.lock irq_context: 0 &tsk->futex_exit_mutex &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nf_sockopt_mutex &rq->__lock irq_context: 0 nf_sockopt_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound connector_reaper_work &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &c->lock irq_context: 0 &u->iolock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &n->list_lock &c->lock irq_context: 0 &pipe->mutex/1 &pipe->wr_wait &p->pi_lock irq_context: 0 &pipe->mutex/1 &pipe->wr_wait &p->pi_lock &rq->__lock irq_context: 0 &pipe->mutex/1 &pipe->wr_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 rcu_read_lock &sighand->siglock irq_context: 0 &pipe->mutex/1 &sighand->siglock irq_context: 0 &pipe->mutex/1 &sighand->siglock stock_lock irq_context: 0 &pipe->mutex/1 &sighand->siglock pool_lock#2 irq_context: 0 &pipe->mutex/1 &sighand->siglock &p->pi_lock irq_context: 0 &pipe->mutex/1 &sighand->siglock &p->pi_lock &rq->__lock irq_context: 0 &pipe->mutex/1 &sighand->siglock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tasklist_lock &sighand->siglock stock_lock irq_context: 0 tasklist_lock &sighand->siglock &obj_hash[i].lock irq_context: softirq &(&nsim_dev->trap_data->trap_report_dw)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &meta_group_info[i]->alloc_sem &bgl->locks[i].lock irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &base->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 sb_writers#4 mapping.invalidate_lock &xa->xa_lock#8 &c->lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &xa->xa_lock#8 &____s->seqcount#2 irq_context: 0 sb_writers#4 mapping.invalidate_lock &xa->xa_lock#8 &____s->seqcount irq_context: 0 sb_writers#4 mapping.invalidate_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 (wq_completion)rcu_gp &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle bit_wait_table + i irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &ei->i_raw_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sk_lock-AF_ALG &____s->seqcount#2 irq_context: 0 sb_writers#4 mapping.invalidate_lock &ei->i_es_lock key#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 sb_writers#4 sb_writers#4 &journal->j_state_lock irq_context: 0 sb_writers#4 sb_writers#4 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 sb_writers#4 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_writers#4 &journal->j_state_lock &base->lock irq_context: 0 sb_writers#4 sb_writers#4 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &pcp->lock &zone->lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock &q->requeue_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &n->list_lock irq_context: 0 sb_writers#4 &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &x->wait#26 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sb->s_type->i_lock_key#22 &xa->xa_lock#8 &obj_hash[i].lock pool_lock irq_context: 0 cgroup_threadgroup_rwsem &cfs_rq->removed.lock irq_context: 0 cgroup_threadgroup_rwsem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 mapping.invalidate_lock &xa->xa_lock#8 &n->list_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &xa->xa_lock#8 &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &bgl->locks[i].lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx quarantine_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 mapping.invalidate_lock &xa->xa_lock#8 batched_entropy_u8.lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &xa->xa_lock#8 kfence_freelist_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal &rq->__lock irq_context: 0 sb_writers#4 sb_internal &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &sbi->s_orphan_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &sbi->s_orphan_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#14 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &r->producer_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &base->lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock quarantine_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &new_node->seq_out_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 rcu_read_lock batched_entropy_u32.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &____s->seqcount#2 irq_context: 0 &kernfs_locks->open_file_mutex[count] &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock rcu_node_0 irq_context: 0 &f->f_pos_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) batched_entropy_u8.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) kfence_freelist_lock irq_context: 0 sk_lock-AF_INET clock-AF_INET irq_context: 0 sk_lock-AF_INET &sctp_port_hashtable[i].lock irq_context: 0 sk_lock-AF_INET &sctp_port_hashtable[i].lock &____s->seqcount#2 irq_context: 0 kn->active#5 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sk_lock-AF_INET &sctp_port_hashtable[i].lock &____s->seqcount irq_context: 0 kn->active#5 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET &sctp_port_hashtable[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET &sctp_port_hashtable[i].lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_INET crngs.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &dir->lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock &ul->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rhashtable_bucket irq_context: 0 sk_lock-AF_INET &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 sk_lock-AF_INET &asoc->wait irq_context: 0 sk_lock-AF_INET rcu_read_lock &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock &____s->seqcount irq_context: 0 tomoyo_ss remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET rcu_read_lock key#22 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle bit_wait_table + i irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &obj_hash[i].lock irq_context: 0 &xa->xa_lock#8 &c->lock irq_context: 0 &xa->xa_lock#8 &____s->seqcount#2 irq_context: 0 &xa->xa_lock#8 &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &journal->j_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &journal->j_state_lock &base->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ret->b_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle bit_wait_table + i irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock &ret->b_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &mapping->private_lock irq_context: 0 sb_writers#4 &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &____s->seqcount#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock bit_wait_table + i irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &lock->wait_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET rcu_read_lock &rcu_state.gp_wq irq_context: 0 sk_lock-AF_INET rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sk_lock-AF_INET slock-AF_INET &sk->sk_lock.wq irq_context: 0 sk_lock-AF_INET rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &lock->wait_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex batched_entropy_u8.lock irq_context: 0 &xt[i].mutex kfence_freelist_lock irq_context: 0 &xt[i].mutex purge_vmap_area_lock &meta->lock irq_context: 0 &xt[i].mutex purge_vmap_area_lock kfence_freelist_lock irq_context: 0 tomoyo_ss remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET krc.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_state_lock &base->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sk_lock-AF_INET fill_pool_map-wait-type-override pool_lock irq_context: 0 sk_lock-AF_INET fill_pool_map-wait-type-override &c->lock irq_context: 0 sk_lock-AF_INET fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock &ret->b_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock &ei->i_raw_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_es_lock key#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: softirq rcu_callback key#22 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET sctp_assocs_id_lock irq_context: 0 sk_lock-AF_INET sctp_assocs_id_lock pool_lock#2 irq_context: 0 &vma->vm_lock->lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &vma->vm_lock->lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &vma->vm_lock->lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET batched_entropy_u32.lock crngs.lock irq_context: 0 sk_lock-AF_INET &list->lock#18 irq_context: 0 slock-AF_INET &sk->sk_lock.wq irq_context: 0 slock-AF_INET &sk->sk_lock.wq &p->pi_lock irq_context: 0 slock-AF_INET &sk->sk_lock.wq &p->pi_lock &rq->__lock irq_context: 0 slock-AF_INET &sk->sk_lock.wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 rcu_read_lock rhashtable_bucket irq_context: 0 sk_lock-AF_TIPC irq_context: 0 sk_lock-AF_TIPC &rq->__lock irq_context: 0 sk_lock-AF_TIPC &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &____s->seqcount#2 irq_context: 0 sk_lock-AF_TIPC slock-AF_TIPC irq_context: 0 slock-AF_TIPC irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &service->lock irq_context: 0 sk_lock-AF_TIPC fs_reclaim irq_context: 0 sk_lock-AF_TIPC fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_TIPC pool_lock#2 irq_context: 0 sk_lock-AF_TIPC &c->lock irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock irq_context: 0 sk_lock-AF_TIPC &list->lock#19 irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &service->lock pool_lock#2 irq_context: 0 sk_lock-AF_TIPC &obj_hash[i].lock irq_context: 0 sk_lock-AF_TIPC k-slock-AF_TIPC &list->lock#19 irq_context: 0 sk_lock-AF_TIPC k-slock-AF_TIPC &obj_hash[i].lock irq_context: 0 sk_lock-AF_TIPC k-slock-AF_TIPC pool_lock#2 irq_context: 0 sk_lock-AF_TIPC &n->list_lock irq_context: 0 sk_lock-AF_TIPC &n->list_lock &c->lock irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC slock-AF_TIPC irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC rcu_read_lock rhashtable_bucket irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC clock-AF_TIPC irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_TIPC irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 slock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rlock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &list->lock#17 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &list->lock#18 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 krc.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock rhashtable_bucket irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 sctp_assocs_id_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 sctp_assocs_id_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 sctp_assocs_id_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 &sctp_ep_hashtable[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 krc.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 &sctp_port_hashtable[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 &sctp_port_hashtable[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 &sctp_port_hashtable[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 clock-AF_INET irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 sb_writers#4 sb_internal &____s->seqcount#2 irq_context: 0 sb_writers#4 sb_internal &____s->seqcount irq_context: 0 sb_writers#4 rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex rcu_node_0 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &rcu_state.expedited_wq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) pool_lock#2 irq_context: 0 bt_proto_lock sco_sk_list.lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 nfnl_subsys_nftables &nft_net->commit_mutex irq_context: 0 &nft_net->commit_mutex irq_context: 0 &nft_net->commit_mutex fs_reclaim irq_context: 0 &nft_net->commit_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &nft_net->commit_mutex stock_lock irq_context: 0 &nft_net->commit_mutex pool_lock#2 irq_context: 0 &nft_net->commit_mutex &c->lock irq_context: 0 &nft_net->commit_mutex batched_entropy_u32.lock irq_context: 0 &nft_net->commit_mutex &obj_hash[i].lock irq_context: 0 &nft_net->commit_mutex &____s->seqcount#2 irq_context: 0 &nft_net->commit_mutex &____s->seqcount irq_context: 0 &nft_net->commit_mutex &rq->__lock irq_context: 0 &nft_net->commit_mutex rcu_read_lock rhashtable_bucket irq_context: 0 &nft_net->commit_mutex rcu_read_lock pool_lock#2 irq_context: 0 &nft_net->commit_mutex nl_table_lock irq_context: 0 &nft_net->commit_mutex nl_table_wait.lock irq_context: 0 &nft_net->commit_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 &nft_net->commit_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 &nft_net->commit_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 &nft_net->commit_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 &nft_net->commit_mutex rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 &nft_net->commit_mutex rcu_read_lock rcu_read_lock_bh &____s->seqcount irq_context: 0 &nft_net->commit_mutex rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 &nft_net->commit_mutex rcu_read_lock rcu_node_0 irq_context: 0 &nft_net->commit_mutex rcu_read_lock &rq->__lock irq_context: 0 &nft_net->commit_mutex rlock-AF_NETLINK irq_context: 0 &nft_net->commit_mutex &p->alloc_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_node_0 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &base->lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_SCO irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_SCO slock-AF_BLUETOOTH-BTPROTO_SCO irq_context: 0 slock-AF_BLUETOOTH-BTPROTO_SCO irq_context: 0 &hdev->lock irq_context: 0 &hdev->lock fs_reclaim irq_context: 0 &hdev->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &hdev->lock pool_lock#2 irq_context: 0 &hdev->lock &obj_hash[i].lock irq_context: 0 &hdev->lock &x->wait#9 irq_context: 0 &hdev->lock &c->lock irq_context: 0 &hdev->lock &list->lock#7 irq_context: 0 &hdev->lock rcu_read_lock &pool->lock/1 irq_context: 0 &hdev->lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &hdev->lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &hdev->lock rcu_node_0 irq_context: 0 &hdev->lock &rq->__lock irq_context: 0 &hdev->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) &n->list_lock irq_context: 0 (wq_completion)hci5#2 (work_completion)(&hdev->cmd_work) &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 &hdev->lock rcu_read_lock rcu_node_0 irq_context: 0 &hdev->lock &obj_hash[i].lock pool_lock irq_context: 0 &hdev->lock sk_lock-AF_BLUETOOTH-BTPROTO_SCO irq_context: 0 &hdev->lock sk_lock-AF_BLUETOOTH-BTPROTO_SCO slock-AF_BLUETOOTH-BTPROTO_SCO irq_context: 0 &hdev->lock sk_lock-AF_BLUETOOTH-BTPROTO_SCO &conn->lock#2 irq_context: 0 &hdev->lock sk_lock-AF_BLUETOOTH-BTPROTO_SCO &obj_hash[i].lock irq_context: 0 &hdev->lock sk_lock-AF_BLUETOOTH-BTPROTO_SCO &base->lock irq_context: 0 &hdev->lock sk_lock-AF_BLUETOOTH-BTPROTO_SCO &base->lock &obj_hash[i].lock irq_context: 0 &hdev->lock slock-AF_BLUETOOTH-BTPROTO_SCO irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_SCO &ei->socket.wq.wait irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &dentry->d_lock &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &dentry->d_lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &dentry->d_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &dentry->d_lock &lru->node[i].lock rcu_read_lock &p->pi_lock irq_context: 0 sb_writers#8 kn->active#5 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 kn->active#5 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 kn->active#5 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_read_lock rhashtable_bucket irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex (work_completion)(&ht->run_work) irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex &ht->mutex irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex &ht->mutex &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex &ht->mutex pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &____s->seqcount#2 irq_context: 0 &iint->mutex remove_cache_srcu irq_context: 0 &iint->mutex remove_cache_srcu quarantine_lock irq_context: 0 &iint->mutex remove_cache_srcu &c->lock irq_context: 0 &iint->mutex remove_cache_srcu &n->list_lock irq_context: 0 &iint->mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &iint->mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 &iint->mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &iint->mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &iint->mutex remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sk_lock-AF_INET fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &____s->seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 kn->active#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 dup_mmap_sem &rq->__lock irq_context: 0 dup_mmap_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock rcu_node_0 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->tvlv.container_list_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->tvlv.container_list_lock pool_lock#2 irq_context: 0 sb_writers#4 mapping.invalidate_lock &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &msk->pm.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 elock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 (work_completion)(&msk->work) irq_context: 0 &sb->s_type->i_mutex_key#10 (work_completion)(&msk->work) &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 (work_completion)(&msk->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &mapping->private_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound connector_reaper_work &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem stock_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &nft_net->commit_mutex (work_completion)(&ht->run_work) irq_context: 0 &nft_net->commit_mutex &ht->mutex irq_context: 0 &nft_net->commit_mutex &ht->mutex &obj_hash[i].lock irq_context: 0 &nft_net->commit_mutex &ht->mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_hook_mutex stock_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex stock_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 &rq->__lock irq_context: 0 &group->mark_mutex &rq->__lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 rcu_read_lock &rq->__lock irq_context: softirq (&timer) rcu_read_lock &n->list_lock irq_context: softirq (&timer) rcu_read_lock &n->list_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rcu_read_lock &rq->__lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_SLIP#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_SLIP#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM rcu_read_lock tk_core.seq.seqcount irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM rcu_read_lock batched_entropy_u8.lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM rcu_read_lock kfence_freelist_lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM rcu_read_lock &meta->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &sem->wait_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock_bh &nr_netdev_xmit_lock_key irq_context: 0 rcu_read_lock_bh &nr_netdev_xmit_lock_key rcu_read_lock tk_core.seq.seqcount irq_context: 0 rcu_read_lock_bh &nr_netdev_xmit_lock_key rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh &nr_netdev_xmit_lock_key nr_node_list_lock irq_context: 0 rcu_read_lock_bh &nr_netdev_xmit_lock_key &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh &nr_netdev_xmit_lock_key pool_lock#2 irq_context: 0 rcu_read_lock_bh &nr_netdev_xmit_lock_key rcu_read_lock &c->lock irq_context: 0 rcu_read_lock_bh &nr_netdev_xmit_lock_key rcu_read_lock &____s->seqcount#2 irq_context: 0 rcu_read_lock_bh &nr_netdev_xmit_lock_key rcu_read_lock &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &sem->wait_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &p->pi_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock_bh _xmit_X25#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &root->kernfs_rwsem &sem->wait_lock irq_context: 0 rcu_read_lock_bh _xmit_X25#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &n->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &c->lock irq_context: 0 sk_lock-AF_INET6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 file_rwsem &rq->__lock irq_context: 0 file_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock stock_lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock (console_sem).lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock console_lock console_srcu console_owner_lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock console_lock console_srcu console_owner irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: softirq rcu_read_lock rcu_read_lock rlock-AF_PACKET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &h->lhash2[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &queue->rskq_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &tcp_hashinfo.bhash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &c->lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &n->list_lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex stock_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex stock_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex stock_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex stock_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex &rq->__lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rcu_read_lock rcu_node_0 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rcu_read_lock &rq->__lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_NETROM irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM slock-AF_NETROM irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM wlock-AF_NETROM irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM &list->lock#20 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM nr_list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM rlock-AF_NETROM irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_NETROM irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 fs_reclaim &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET fill_pool_map-wait-type-override batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET fill_pool_map-wait-type-override kfence_freelist_lock irq_context: 0 (wq_completion)netns irq_context: 0 (wq_completion)netns net_cleanup_work irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem net_rwsem irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->nsid_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &tn->node_list_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ebt_mutex irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &xt[i].mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &nft_net->commit_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_ct_ecache_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem netns_bpf_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 cb_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 cb_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 cb_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &base->lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: softirq rcu_read_lock hwsim_radio_lock &____s->seqcount#2 irq_context: softirq rcu_read_lock hwsim_radio_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock batched_entropy_u32.lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 rcu_read_lock &dentry->d_lock &p->pi_lock irq_context: 0 rcu_read_lock &dentry->d_lock &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock &dentry->d_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (&net->fs_probe_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->cells_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (&net->cells_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &pool->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem bit_wait_table + i irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)afs irq_context: 0 (wq_completion)afs (work_completion)(&net->cells_manager) irq_context: 0 (wq_completion)afs (work_completion)(&net->cells_manager) &net->cells_lock irq_context: 0 (wq_completion)afs (work_completion)(&net->cells_manager) bit_wait_table + i irq_context: 0 (wq_completion)afs (work_completion)(&net->cells_manager) bit_wait_table + i &p->pi_lock irq_context: 0 (wq_completion)afs (work_completion)(&net->cells_manager) bit_wait_table + i &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)afs (work_completion)(&net->cells_manager) bit_wait_table + i &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (&net->fs_timer) irq_context: 0 (wq_completion)afs (work_completion)(&net->fs_manager) irq_context: 0 (wq_completion)afs (work_completion)(&net->fs_manager) &(&net->fs_lock)->lock irq_context: 0 (wq_completion)afs (work_completion)(&net->fs_manager) bit_wait_table + i irq_context: 0 (wq_completion)afs (work_completion)(&net->fs_manager) bit_wait_table + i &p->pi_lock irq_context: 0 (wq_completion)afs (work_completion)(&net->fs_manager) bit_wait_table + i &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)afs (work_completion)(&net->fs_manager) bit_wait_table + i &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC k-slock-AF_RXRPC irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &rx->incoming_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->conn_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &call->waitq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC rcu_read_lock &call->notify_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC (rxrpc_call_limiter).lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &rx->recvmsg_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &rx->call_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->call_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC (&call->timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &list->lock#21 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-slock-AF_RXRPC irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (wq_completion)kafsd irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wq->mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wq->mutex &pool->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wq->mutex &x->wait#10 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-clock-AF_RXRPC irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &local->services_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (wq_completion)krxrpcd irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wq->mutex &pool->lock/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rlock-AF_RXRPC irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &x->wait irq_context: 0 &rxnet->local_mutex irq_context: 0 (&local->client_conn_reap_timer) irq_context: 0 &rxnet->conn_lock irq_context: 0 &table->hash[i].lock irq_context: 0 &table->hash[i].lock &table->hash2[i].lock irq_context: 0 k-clock-AF_INET6 irq_context: 0 &list->lock#22 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &sb->s_type->i_lock_key#8 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &xa->xa_lock#8 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &fsnotify_mark_srcu irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem proc_subdir_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &ent->pde_unload_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem proc_inum_ida.xa_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex (work_completion)(&data->gc_work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex cpu_hotplug_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex fs_reclaim irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex nf_hook_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex nf_hook_mutex cpu_hotplug_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex nf_hook_mutex cpu_hotplug_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex nf_hook_mutex cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex nf_hook_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex nf_hook_mutex cpu_hotplug_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_connlabels_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex net_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&(&ovs_net->masks_rebalance)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&(&ovs_net->masks_rebalance)->work) &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&(&ovs_net->masks_rebalance)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&ovs_net->dp_notify_work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &srv->idr_lock irq_context: 0 remove_cache_srcu rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem wq_pool_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem wq_pool_mutex &wq->mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &pool->lock/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_callback &x->wait#24 irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &rnp->exp_lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC k-slock-AF_TIPC irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &nt->cluster_scope_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC k-clock-AF_TIPC irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-slock-AF_TIPC irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ptype_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dir->lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&tn->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &tn->nametbl_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&ht->run_work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &ht->mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &ht->mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &ht->mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&(&c->work)->work) irq_context: softirq rcu_callback &x->wait#24 &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem pcpu_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-clock-AF_INET6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-slock-AF_INET6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (wq_completion)krdsd irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&rtn->rds_tcp_accept_w) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 &h->lhash2[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 &queue->rskq_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 elock-AF_INET6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rds_tcp_conn_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem sysctl_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem sysctl_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem sysctl_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem sysctl_lock krc.lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_callback rlock-AF_RXRPC irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) &hwstats->hwsdev_list_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem loop_conns_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (wq_completion)l2tp irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex rcu_state.barrier_lock irq_context: hardirq rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex &x->wait#24 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 remove_cache_srcu rcu_read_lock &obj_hash[i].lock irq_context: 0 &child->perf_event_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_callback &x->wait#24 &p->pi_lock &rq->__lock irq_context: softirq rcu_callback &x->wait#24 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (&rxnet->peer_keepalive_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&rxnet->peer_keepalive_work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (&rxnet->service_conn_reap_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &x->wait#10 irq_context: 0 (wq_completion)krxrpcd (work_completion)(&rxnet->service_conn_reaper) irq_context: 0 (wq_completion)krxrpcd (work_completion)(&rxnet->service_conn_reaper) &rxnet->conn_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rxnet->conn_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_base_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_base_lock &xa->xa_lock#3 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock &list->lock#5 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex bpf_devs_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex net_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &in_dev->mc_tomb_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysctl_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysctl_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysctl_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysctl_lock krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex stock_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &tbl->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dir->lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex class irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (&tbl->proxy_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ul->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &net->xdp.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex mirred_list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &nft_net->commit_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &tn->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &tb->tb6_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex proc_subdir_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ent->pde_unload_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &net->ipv6.addrconf_hash_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ndev->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ndev->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_query_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_query_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (work_completion)(&(&idev->mc_report_work)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_report_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &pnn->pndevs.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &pnn->routes.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &pnettable->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex smc_ib_devices.mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex target_list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_NONE irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysfs_symlink_target_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex kernfs_idr_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &k->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_hotplug_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 remove_cache_srcu irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 remove_cache_srcu quarantine_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 remove_cache_srcu &c->lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 remove_cache_srcu &n->list_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 remove_cache_srcu &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex subsys mutex#17 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex subsys mutex#17 &k->k_lock klist_remove_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &x->wait#9 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dpm_list_mtx irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &dev->power.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex deferred_probe_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex device_links_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex gdp_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock xps_map_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: softirq rcu_callback per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: softirq rcu_callback pcpu_lock stock_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_lock_key &xa->xa_lock#8 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 tasklist_lock stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem dev_base_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem lweventlist_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem napi_hash_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem fs_reclaim irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem stock_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dir->lock#2 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dir->lock#2 pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem netdev_unregistering_wq.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex kfence_freelist_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &meta->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_TUNNEL6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_SCO irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_SCO slock-AF_BLUETOOTH-BTPROTO_SCO irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_SCO &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_SCO &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_SCO &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_SCO &conn->lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_SCO rcu_read_lock &pool->lock/1 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_SCO rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_SCO rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_SCO rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_SCO rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_SCO rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_SCO rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_SCO &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_SCO &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci5#2 (work_completion)(&(&conn->disc_work)->work) irq_context: 0 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_SCO &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_SCO pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_SCO rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_BLUETOOTH-BTPROTO_SCO irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_BLUETOOTH irq_context: 0 &sb->s_type->i_mutex_key#10 sco_sk_list.lock irq_context: 0 &pipe->mutex/1 &mm->mmap_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sk_lock-AF_PACKET &rnp->exp_lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rnp->exp_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rnp->exp_wq[0] irq_context: 0 pernet_ops_rwsem lock kernfs_idr_lock &c->lock irq_context: 0 pernet_ops_rwsem lock kernfs_idr_lock &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem lock kernfs_idr_lock &____s->seqcount irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 pernet_ops_rwsem fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rcu_node_0 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex krc.lock &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_SIT irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock &c->lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock rcu_node_0 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock &rq->__lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nlk_cb_mutex-GENERIC irq_context: 0 nlk_cb_mutex-GENERIC &lock->wait_lock irq_context: 0 nlk_cb_mutex-GENERIC &rq->__lock irq_context: 0 nlk_cb_mutex-GENERIC &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex &cfs_rq->removed.lock irq_context: 0 &xt[i].mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex.wait_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&bat_priv->dat.work)->timer irq_context: softirq (&peer->timer_retransmit_handshake) irq_context: softirq &(&bat_priv->dat.work)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&bat_priv->dat.work)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq (&peer->timer_retransmit_handshake) &peer->endpoint_lock irq_context: softirq &(&bat_priv->dat.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq (&peer->timer_retransmit_handshake) rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq &(&bat_priv->dat.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq (&peer->timer_retransmit_handshake) rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: softirq &(&bat_priv->dat.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&peer->timer_retransmit_handshake) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq (&peer->timer_retransmit_handshake) rcu_read_lock_bh rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: softirq (&peer->timer_retransmit_handshake) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq &(&bat_priv->bla.work)->timer irq_context: softirq &(&bat_priv->bla.work)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&bat_priv->bla.work)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&bat_priv->bla.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) &hash->list_locks[i] irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) &base->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) key#20 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &entry->crc_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) &base->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex uevent_sock_mutex.wait_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex pcpu_alloc_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex.wait_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &p->pi_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &p->pi_lock &rq->__lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &rq->__lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex batched_entropy_u8.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback stock_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &ht->mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events &rq->__lock irq_context: 0 (wq_completion)events (debug_obj_work).work rcu_node_0 irq_context: 0 (wq_completion)events (debug_obj_work).work &rq->__lock irq_context: 0 (wq_completion)events (debug_obj_work).work &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 bt_proto_lock hidp_sk_list.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ul->lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_TUNNEL irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &sem->wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 hidp_sk_list.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex net_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock batched_entropy_u8.lock irq_context: softirq rcu_callback &x->wait#24 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&port->bc_work) &meta->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&port->bc_work) kfence_freelist_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &c->lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex &rq->__lock irq_context: 0 &xt[i].mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &xt[i].mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &xt[i].mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &xt[i].mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 ptlock_ptr(page)#2 ptlock_ptr(page)#2/1 rcu_read_lock &p->pi_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 ptlock_ptr(page)#2 ptlock_ptr(page)#2/1 rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 ptlock_ptr(page)#2 ptlock_ptr(page)#2/1 rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &rcu_state.expedited_wq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysctl_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_ETHER irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u32.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 sb_writers#8 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#8 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key rcu_node_0 irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &root->kernfs_iattr_rwsem iattr_mutex &rq->__lock irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &root->kernfs_iattr_rwsem iattr_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &vma->vm_lock->lock mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&aux->work) stock_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pcpu_lock stock_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) purge_vmap_area_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) purge_vmap_area_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock &meta->lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &pcp->lock &zone->lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock pool_lock#2 irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock hci_sk_list.lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock hci_dev_list_lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock (console_sem).lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock console_lock console_srcu console_owner_lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock console_lock console_srcu console_owner irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &rq->__lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &fq->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->active_txq_lock[i] irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &local->active_txq_lock[i] irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &local->queue_stop_reason_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &fq->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &fq->lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &list->lock#16 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock &list->lock#16 irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock fs_reclaim irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &c->lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock rlock-AF_BLUETOOTH irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_IPGRE irq_context: 0 &p->lock remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock &meta->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &fn->fou_lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&p->wq) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle key#4 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (reaper_work).work &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ipvs->sync_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem hwsim_radio_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem hwsim_netgroup_ida.xa_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rdma_nets_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rdma_nets_rwsem rdma_nets.xa_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem devices_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nl_table_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nl_table_wait.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-clock-AF_NETLINK irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &nlk->wait irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem wlock-AF_NETLINK irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dir->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rdma_nets.xa_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_base_lock &xa->xa_lock#3 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_base_lock &xa->xa_lock#3 pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex _xmit_LOOPBACK irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &(&net->nexthop.notifier_chain)->rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &hn->hn_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)inet_frag_wq irq_context: 0 (wq_completion)inet_frag_wq (work_completion)(&fqdir->destroy_work) irq_context: 0 (wq_completion)inet_frag_wq (work_completion)(&fqdir->destroy_work) (work_completion)(&ht->run_work) irq_context: 0 (wq_completion)inet_frag_wq (work_completion)(&fqdir->destroy_work) &ht->mutex irq_context: 0 (wq_completion)inet_frag_wq (work_completion)(&fqdir->destroy_work) &ht->mutex &obj_hash[i].lock irq_context: 0 (wq_completion)inet_frag_wq (work_completion)(&fqdir->destroy_work) &ht->mutex pool_lock#2 irq_context: 0 (wq_completion)inet_frag_wq (work_completion)(&fqdir->destroy_work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)inet_frag_wq (work_completion)(&fqdir->destroy_work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events fqdir_free_work irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex rcu_state.barrier_lock irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex &x->wait#24 irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex &pool->lock irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex &rq->__lock irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &this->info_list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &pnettable->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &pnetids_ndev->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6/1 k-slock-AF_INET6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6/1 rlock-AF_INET6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6/1 &list->lock#17 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->sctp.addr_wq_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->sctp.addr_wq_lock k-slock-AF_INET6/1 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->sctp.addr_wq_lock k-slock-AF_INET6/1 &sctp_ep_hashtable[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->sctp.addr_wq_lock k-slock-AF_INET6/1 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->sctp.addr_wq_lock k-slock-AF_INET6/1 pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->sctp.addr_wq_lock k-slock-AF_INET6/1 k-clock-AF_INET6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->sctp.addr_wq_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-slock-AF_INET6 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-slock-AF_INET6 pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-slock-AF_INET6 elock-AF_INET6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events fqdir_free_work &obj_hash[i].lock irq_context: 0 (wq_completion)events fqdir_free_work pool_lock#2 irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock &n->list_lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh quarantine_lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &base->lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_node_0 irq_context: softirq (&peer->timer_retransmit_handshake) &peer->endpoint_lock &obj_hash[i].lock irq_context: softirq (&peer->timer_retransmit_handshake) &peer->endpoint_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &base->lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &base->lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 sk_lock-AF_TIPC &list->lock#23 irq_context: 0 sk_lock-AF_TIPC k-slock-AF_TIPC &list->lock#23 irq_context: 0 sk_lock-AF_TIPC k-slock-AF_TIPC k-clock-AF_TIPC irq_context: 0 sk_lock-AF_TIPC k-slock-AF_TIPC k-clock-AF_TIPC rcu_read_lock &pool->lock/1 irq_context: 0 sk_lock-AF_TIPC k-slock-AF_TIPC k-clock-AF_TIPC rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 sk_lock-AF_TIPC k-slock-AF_TIPC k-clock-AF_TIPC rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 sk_lock-AF_TIPC k-slock-AF_TIPC k-clock-AF_TIPC rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 sk_lock-AF_TIPC k-slock-AF_TIPC k-clock-AF_TIPC rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_TIPC k-slock-AF_TIPC k-clock-AF_TIPC rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)tipc_rcv irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) &srv->idr_lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) fs_reclaim irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) pool_lock#2 irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) &sb->s_type->i_lock_key#8 irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-sk_lock-AF_TIPC irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-sk_lock-AF_TIPC k-slock-AF_TIPC irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-sk_lock-AF_TIPC fs_reclaim irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-sk_lock-AF_TIPC fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-sk_lock-AF_TIPC pool_lock#2 irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-sk_lock-AF_TIPC &dir->lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-sk_lock-AF_TIPC &obj_hash[i].lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-sk_lock-AF_TIPC batched_entropy_u32.lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-sk_lock-AF_TIPC rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-sk_lock-AF_TIPC k-sk_lock-AF_TIPC/1 irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-sk_lock-AF_TIPC k-sk_lock-AF_TIPC/1 k-slock-AF_TIPC irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-sk_lock-AF_TIPC k-sk_lock-AF_TIPC/1 &obj_hash[i].lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-sk_lock-AF_TIPC k-sk_lock-AF_TIPC/1 &base->lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-sk_lock-AF_TIPC k-sk_lock-AF_TIPC/1 &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-sk_lock-AF_TIPC k-sk_lock-AF_TIPC/1 fs_reclaim irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-sk_lock-AF_TIPC k-sk_lock-AF_TIPC/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-sk_lock-AF_TIPC k-sk_lock-AF_TIPC/1 pool_lock#2 irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-sk_lock-AF_TIPC k-sk_lock-AF_TIPC/1 &list->lock#23 irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-sk_lock-AF_TIPC k-sk_lock-AF_TIPC/1 slock-AF_TIPC &list->lock#23 irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-slock-AF_TIPC irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) &obj_hash[i].lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) &srv->idr_lock pool_lock#2 irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-clock-AF_TIPC irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-clock-AF_TIPC irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-clock-AF_TIPC rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-clock-AF_TIPC rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-clock-AF_TIPC rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-clock-AF_TIPC rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 sk_lock-AF_TIPC &ei->socket.wq.wait irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-clock-AF_TIPC rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_TIPC &base->lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) k-clock-AF_TIPC rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) &rq->__lock irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) &xa->xa_lock#8 irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&srv->awork) &fsnotify_mark_srcu irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-sk_lock-AF_TIPC irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-sk_lock-AF_TIPC k-slock-AF_TIPC irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-slock-AF_TIPC irq_context: 0 pernet_ops_rwsem fs_reclaim &rq->__lock irq_context: 0 pernet_ops_rwsem fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex &xa->xa_lock#3 &c->lock irq_context: 0 bt_proto_lock &obj_hash[i].lock pool_lock irq_context: 0 &hdev->lock &base->lock irq_context: 0 &hdev->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 rcu_read_lock &trie->lock irq_context: 0 rcu_read_lock &trie->lock stock_lock irq_context: 0 rcu_read_lock &trie->lock pool_lock#2 irq_context: 0 rcu_read_lock &trie->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock &trie->lock krc.lock irq_context: 0 rcu_read_lock &trie->lock &c->lock irq_context: 0 rcu_read_lock &trie->lock &____s->seqcount#2 irq_context: 0 rcu_read_lock &trie->lock &____s->seqcount irq_context: 0 rcu_read_lock &trie->lock &obj_hash[i].lock pool_lock irq_context: 0 &hdev->lock &conn->chan_lock irq_context: 0 &hdev->lock &conn->chan_lock &chan->lock/1 irq_context: 0 &hdev->lock &conn->chan_lock &chan->lock/1 &obj_hash[i].lock irq_context: 0 &hdev->lock &conn->chan_lock &chan->lock/1 &base->lock irq_context: 0 &hdev->lock &conn->chan_lock &chan->lock/1 &base->lock &obj_hash[i].lock irq_context: 0 &hdev->lock &conn->chan_lock &chan->lock/1 chan_list_lock irq_context: 0 &hdev->lock &conn->chan_lock &chan->lock/1 &conn->ident_lock irq_context: 0 &hdev->lock &conn->chan_lock &chan->lock/1 fs_reclaim irq_context: 0 &hdev->lock &conn->chan_lock &chan->lock/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &hdev->lock &conn->chan_lock &chan->lock/1 pool_lock#2 irq_context: 0 &hdev->lock &conn->chan_lock &chan->lock/1 &list->lock#9 irq_context: 0 &hdev->lock &conn->chan_lock &chan->lock/1 rcu_read_lock &pool->lock/1 irq_context: 0 &hdev->lock &conn->chan_lock &chan->lock/1 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &hdev->lock &conn->chan_lock &chan->lock/1 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &hdev->lock &conn->chan_lock &chan->lock/1 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &hdev->lock &conn->chan_lock &chan->lock/1 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &hdev->lock &conn->chan_lock &chan->lock/1 &rq->__lock irq_context: 0 &hdev->lock &conn->chan_lock &chan->lock/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 rcu_read_lock &trie->lock &n->list_lock irq_context: 0 rcu_read_lock &trie->lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP &ei->socket.wq.wait irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-sk_lock-AF_TIPC &obj_hash[i].lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-sk_lock-AF_TIPC pool_lock#2 irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-clock-AF_TIPC irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-clock-AF_TIPC &con->sub_lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-sk_lock-AF_TIPC &c->lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-sk_lock-AF_TIPC &____s->seqcount#2 irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-sk_lock-AF_TIPC &____s->seqcount irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-sk_lock-AF_TIPC &list->lock#23 irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-sk_lock-AF_TIPC slock-AF_TIPC &list->lock#23 irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-sk_lock-AF_TIPC slock-AF_TIPC rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-sk_lock-AF_TIPC slock-AF_TIPC rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-sk_lock-AF_TIPC slock-AF_TIPC rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-sk_lock-AF_TIPC slock-AF_TIPC rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) &srv->idr_lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) &srv->idr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) &srv->idr_lock pool_lock#2 irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-sk_lock-AF_TIPC &base->lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-sk_lock-AF_TIPC &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-sk_lock-AF_TIPC rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) k-sk_lock-AF_TIPC k-clock-AF_TIPC irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) &obj_hash[i].lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) pool_lock#2 irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) &sb->s_type->i_lock_key#8 irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) &xa->xa_lock#8 irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) &fsnotify_mark_srcu irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) &con->outqueue_lock irq_context: 0 (wq_completion)tipc_rcv (work_completion)(&con->rwork) &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &base->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock &trie->lock krc.lock &obj_hash[i].lock irq_context: 0 rcu_read_lock &trie->lock krc.lock &base->lock irq_context: 0 rcu_read_lock &trie->lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET fill_pool_map-wait-type-override pool_lock irq_context: 0 rcu_read_lock &trie->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rcu_read_lock &trie->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rcu_read_lock &trie->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 rcu_read_lock &trie->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 rcu_read_lock &trie->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock &____s->seqcount#2 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock batched_entropy_u8.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock kfence_freelist_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex bus_type_sem &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex bus_type_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 lock link_idr_lock irq_context: 0 lock link_idr_lock pool_lock#2 irq_context: 0 tracepoints_mutex irq_context: 0 tracepoints_mutex fs_reclaim irq_context: 0 tracepoints_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 tracepoints_mutex pool_lock#2 irq_context: 0 tracepoints_mutex cpu_hotplug_lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex &rq->__lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tracepoints_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 tracepoints_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 tracepoints_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 link_idr_lock irq_context: 0 fanout_mutex irq_context: 0 fanout_mutex &rq->__lock irq_context: 0 fanout_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 fanout_mutex fs_reclaim irq_context: 0 fanout_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 fanout_mutex pool_lock#2 irq_context: 0 fanout_mutex &po->bind_lock irq_context: 0 fanout_mutex &po->bind_lock ptype_lock irq_context: 0 fanout_mutex &po->bind_lock &match->lock irq_context: 0 fanout_mutex &po->bind_lock &match->lock ptype_lock irq_context: 0 &match->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex &n->list_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &n->list_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &n->list_lock &c->lock irq_context: 0 link_idr_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 link_idr_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 link_idr_lock &obj_hash[i].lock irq_context: 0 link_idr_lock pool_lock#2 irq_context: 0 tracepoints_mutex tracepoint_srcu_srcu_usage.lock irq_context: 0 tracepoints_mutex tracepoint_srcu_srcu_usage.lock &obj_hash[i].lock irq_context: 0 tracepoints_mutex tracepoint_srcu_srcu_usage.lock &ACCESS_PRIVATE(sdp, lock) irq_context: 0 tracepoints_mutex tracepoint_srcu_srcu_usage.lock &base->lock irq_context: 0 tracepoints_mutex tracepoint_srcu_srcu_usage.lock &base->lock &obj_hash[i].lock irq_context: 0 tracepoints_mutex &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &po->bind_lock &match->lock irq_context: 0 &u->iolock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 &po->bind_lock &match->lock ptype_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &match->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex tracepoint_srcu_srcu_usage.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &ssp->srcu_sup->srcu_cb_mutex tracepoint_srcu_srcu_usage.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex &base->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) tracepoint_srcu_srcu_usage.lock irq_context: 0 map_idr_lock &obj_hash[i].lock irq_context: 0 map_idr_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) stock_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &obj_hash[i].lock irq_context: softirq (&sdp->delay_work) irq_context: softirq (&sdp->delay_work) rcu_read_lock &pool->lock irq_context: softirq (&sdp->delay_work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq (&sdp->delay_work) rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq (&sdp->delay_work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq (&sdp->delay_work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_callback tracepoint_srcu_srcu_usage.lock &ACCESS_PRIVATE(sdp, lock) irq_context: softirq rcu_callback tracepoint_srcu_srcu_usage.lock &obj_hash[i].lock irq_context: softirq rcu_callback tracepoint_srcu_srcu_usage.lock &base->lock irq_context: softirq rcu_callback tracepoint_srcu_srcu_usage.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback quarantine_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 &fsnotify_mark_srcu &group->notification_waitq &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &ep->mtx stock_lock irq_context: 0 &ep->mtx rcu_read_lock pool_lock#2 irq_context: 0 &ep->mtx wakeup_ida.xa_lock irq_context: 0 &ep->mtx &x->wait#9 irq_context: 0 &ep->mtx &k->list_lock irq_context: 0 &ep->mtx gdp_mutex irq_context: 0 &ep->mtx gdp_mutex &k->list_lock irq_context: 0 &ep->mtx gdp_mutex fs_reclaim irq_context: 0 &ep->mtx gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &ep->mtx gdp_mutex pool_lock#2 irq_context: 0 &ep->mtx gdp_mutex &c->lock irq_context: 0 &ep->mtx gdp_mutex lock irq_context: 0 &ep->mtx gdp_mutex lock kernfs_idr_lock irq_context: 0 &ep->mtx gdp_mutex &root->kernfs_rwsem irq_context: 0 &ep->mtx gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &obj_hash[i].lock irq_context: 0 &ep->mtx lock irq_context: 0 &ep->mtx lock kernfs_idr_lock irq_context: 0 &ep->mtx &root->kernfs_rwsem irq_context: 0 &ep->mtx &root->kernfs_rwsem &rq->__lock irq_context: 0 &ep->mtx &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &ep->mtx bus_type_sem irq_context: 0 &ep->mtx sysfs_symlink_target_lock irq_context: 0 &ep->mtx uevent_sock_mutex irq_context: 0 &ep->mtx uevent_sock_mutex fs_reclaim irq_context: 0 &ep->mtx uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &ep->mtx uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &ep->mtx uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx uevent_sock_mutex pool_lock#2 irq_context: 0 &ep->mtx uevent_sock_mutex nl_table_lock irq_context: 0 &ep->mtx uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 &ep->mtx uevent_sock_mutex nl_table_wait.lock irq_context: 0 &ep->mtx uevent_sock_mutex &obj_hash[i].lock irq_context: 0 &ep->mtx uevent_sock_mutex.wait_lock irq_context: 0 &ep->mtx &p->pi_lock irq_context: 0 &ep->mtx &p->pi_lock &rq->__lock irq_context: 0 &ep->mtx &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx subsys mutex#15 irq_context: 0 &ep->mtx subsys mutex#15 &k->k_lock irq_context: 0 &ep->mtx events_lock irq_context: 0 &ep->mtx &dentry->d_lock irq_context: 0 &ep->mtx lock kernfs_idr_lock pool_lock#2 irq_context: 0 &ep->mtx &u->lock irq_context: 0 &ep->mtx &u->lock &u->peer_wait irq_context: 0 &p->lock remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait &ep->lock irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait &ep->lock rcu_read_lock &ws->lock irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait &ep->lock rcu_read_lock &ws->lock tk_core.seq.seqcount irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait &ep->lock rcu_read_lock &ws->lock &obj_hash[i].lock irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx &ws->lock irq_context: 0 &ep->mtx &ws->lock tk_core.seq.seqcount irq_context: 0 &ep->mtx &ws->lock &obj_hash[i].lock irq_context: 0 &ep->mtx &ep->lock &ws->lock irq_context: 0 &ep->mtx &ep->lock &ws->lock tk_core.seq.seqcount irq_context: 0 &ep->mtx &ep->lock &ws->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET k-clock-AF_INET irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-slock-AF_INET irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-slock-AF_INET &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-slock-AF_INET pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-slock-AF_INET elock-AF_INET irq_context: 0 &u->iolock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx quarantine_lock irq_context: 0 &ep->poll_wait irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait &ep->poll_wait/1 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &mapping->private_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 batched_entropy_u8.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 kfence_freelist_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &(ei->i_block_reservation_lock) key#14 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock &trie->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem remove_cache_srcu &c->lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu &n->list_lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem remove_cache_srcu &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu &rq->__lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex &n->list_lock irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &pcp->lock &zone->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 &____s->seqcount irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock sb_pagefaults irq_context: 0 &mm->mmap_lock sb_pagefaults tk_core.seq.seqcount irq_context: 0 &mm->mmap_lock sb_pagefaults mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock sb_pagefaults &c->lock irq_context: 0 &mm->mmap_lock sb_pagefaults pool_lock#2 irq_context: 0 &mm->mmap_lock sb_pagefaults &journal->j_state_lock irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &ei->i_raw_lock irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &journal->j_wait_updates irq_context: 0 &mm->mmap_lock sb_pagefaults &obj_hash[i].lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &mapping->private_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 &mm->mmap_lock &mapping->private_lock irq_context: 0 &mm->mmap_lock &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page)#2 irq_context: 0 &mm->mmap_lock sb_pagefaults remove_cache_srcu irq_context: 0 &mm->mmap_lock sb_pagefaults remove_cache_srcu quarantine_lock irq_context: 0 &mm->mmap_lock sb_pagefaults remove_cache_srcu &c->lock irq_context: 0 &mm->mmap_lock sb_pagefaults remove_cache_srcu &n->list_lock irq_context: 0 &mm->mmap_lock sb_pagefaults remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock sb_pagefaults remove_cache_srcu &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex lock kernfs_idr_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex &c->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &____s->seqcount irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 l2cap_sk_list.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP slock-AF_BLUETOOTH-BTPROTO_L2CAP irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_BLUETOOTH-BTPROTO_L2CAP irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 irq_context: 0 &sb->s_type->i_mutex_key#10 &conn->chan_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &conn->chan_lock &chan->lock/1 irq_context: 0 &sb->s_type->i_mutex_key#10 &conn->chan_lock &chan->lock/1 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &conn->chan_lock &chan->lock/1 &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &conn->chan_lock &chan->lock/1 &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &conn->chan_lock &chan->lock/1 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP/1 irq_context: 0 &sb->s_type->i_mutex_key#10 &conn->chan_lock &chan->lock/1 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP/1 slock-AF_BLUETOOTH-BTPROTO_L2CAP irq_context: 0 &sb->s_type->i_mutex_key#10 &conn->chan_lock &chan->lock/1 slock-AF_BLUETOOTH-BTPROTO_L2CAP irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 clock-AF_BLUETOOTH irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 rlock-AF_BLUETOOTH irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 wlock-AF_BLUETOOTH irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 &dir->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 chan_list_lock irq_context: 0 &mm->mmap_lock sb_pagefaults &base->lock irq_context: 0 &mm->mmap_lock sb_pagefaults &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock sb_pagefaults &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &ei->socket.wq.wait &ep->poll_wait/1 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock &ei->i_raw_lock irq_context: 0 &u->lock &ei->socket.wq.wait &ep->lock irq_context: 0 &u->lock &ei->socket.wq.wait &ep->poll_wait/1 irq_context: 0 rcu_read_lock &ei->socket.wq.wait &ep->poll_wait/1 irq_context: 0 &ep->mtx &mm->mmap_lock sb_pagefaults irq_context: 0 &ep->mtx &mm->mmap_lock sb_pagefaults tk_core.seq.seqcount irq_context: 0 &ep->mtx &mm->mmap_lock sb_pagefaults mmu_notifier_invalidate_range_start irq_context: 0 &ep->mtx &mm->mmap_lock sb_pagefaults &journal->j_state_lock irq_context: 0 &ep->mtx &mm->mmap_lock sb_pagefaults jbd2_handle irq_context: 0 &ep->mtx &mm->mmap_lock sb_pagefaults jbd2_handle &ei->i_raw_lock irq_context: 0 &ep->mtx &mm->mmap_lock sb_pagefaults jbd2_handle &journal->j_wait_updates irq_context: 0 &ep->mtx &mm->mmap_lock sb_pagefaults &obj_hash[i].lock irq_context: 0 &ep->mtx &mm->mmap_lock sb_pagefaults mapping.invalidate_lock irq_context: 0 &ep->mtx &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &mapping->private_lock irq_context: 0 &ep->mtx &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 &ep->mtx &mm->mmap_lock &mapping->private_lock irq_context: 0 &ep->mtx &mm->mmap_lock &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_node_0 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)events (work_completion)(&aux->work) per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &ep->mtx &ACCESS_PRIVATE(sdp, lock) irq_context: 0 &ep->mtx wakeup_srcu irq_context: 0 &ep->mtx wakeup_srcu_srcu_usage.lock &ACCESS_PRIVATE(sdp, lock) irq_context: 0 &ep->mtx wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock irq_context: 0 &ep->mtx wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &ep->mtx wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &ep->mtx wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &ep->mtx wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx &x->wait#3 irq_context: 0 &ep->mtx (&ws->timer) irq_context: 0 &ep->mtx &base->lock irq_context: 0 &ep->mtx &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 &ep->mtx &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 &ep->mtx &root->kernfs_rwsem pool_lock#2 irq_context: 0 &ep->mtx subsys mutex#15 &k->k_lock klist_remove_lock irq_context: 0 &ep->mtx deferred_probe_mutex irq_context: 0 &ep->mtx device_links_lock irq_context: 0 &ep->mtx mmu_notifier_invalidate_range_start irq_context: 0 &ep->mtx uevent_sock_mutex mmu_notifier_invalidate_range_start irq_context: 0 &ep->mtx uevent_sock_mutex &rq->__lock irq_context: 0 &ep->mtx uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &dentry->d_lock &sb->s_type->i_lock_key#24 &dentry->d_lock &lru->node[i].lock irq_context: 0 sk_lock-AF_INET remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx deleted_ws.lock irq_context: 0 events_lock irq_context: 0 wakeup_srcu irq_context: 0 wakeup_srcu_srcu_usage.lock &ACCESS_PRIVATE(sdp, lock) irq_context: 0 wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock irq_context: 0 wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 wakeup_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &cfs_rq->removed.lock irq_context: 0 (&ws->timer) irq_context: 0 subsys mutex#15 irq_context: 0 subsys mutex#15 &k->k_lock irq_context: 0 subsys mutex#15 &k->k_lock klist_remove_lock irq_context: 0 uevent_sock_mutex mmu_notifier_invalidate_range_start irq_context: 0 uevent_sock_mutex pool_lock#2 irq_context: 0 uevent_sock_mutex nl_table_lock irq_context: 0 uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 uevent_sock_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 uevent_sock_mutex nl_table_wait.lock irq_context: 0 gdp_mutex sysfs_symlink_target_lock irq_context: 0 &ws->lock irq_context: 0 deleted_ws.lock irq_context: 0 wakeup_ida.xa_lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 &mapping->private_lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 remove_cache_srcu pool_lock#2 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 remove_cache_srcu &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &____s->seqcount#2 irq_context: 0 &xt[i].mutex &pcp->lock &zone->lock irq_context: 0 &xt[i].mutex &mm->mmap_lock &cfs_rq->removed.lock irq_context: 0 &xt[i].mutex &mm->mmap_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock rename_lock irq_context: 0 sb_writers#4 &meta->lock irq_context: 0 sb_writers#4 kfence_freelist_lock irq_context: 0 sb_writers#4 &mapping->private_lock rcu_read_lock rcu_read_lock key#10 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &(ei->i_block_reservation_lock) key#14 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss &____s->seqcount#2 irq_context: softirq (&peer->timer_retransmit_handshake) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq (&peer->timer_retransmit_handshake) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 tomoyo_ss quarantine_lock irq_context: 0 sb_writers#4 tomoyo_ss remove_cache_srcu irq_context: 0 sb_writers#4 tomoyo_ss remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 tomoyo_ss remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 tomoyo_ss remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 tomoyo_ss remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 tomoyo_ss remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 tomoyo_ss remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 tomoyo_ss remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 tomoyo_ss remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#8 &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#8 &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#8 &n->list_lock &c->lock irq_context: 0 (wq_completion)events_power_efficient (gc_work).work &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (gc_work).work &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN slock-AF_CAN irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN clock-AF_CAN irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_CAN irq_context: 0 rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &rq->__lock irq_context: 0 sb_writers#8 &sb->s_type->i_mutex_key#13 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)events_unbound (reaper_work).work pool_lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#8 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_callback rlock-AF_CAN irq_context: softirq rcu_callback elock-AF_CAN irq_context: 0 &vma->vm_lock->lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex ima_extend_list_mutex &____s->seqcount#2 irq_context: 0 &iint->mutex ima_extend_list_mutex &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem pool_lock#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem lock#4 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem lock#5 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &rq->__lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle lock#4 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle lock#5 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle pool_lock#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_es_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sbi->s_md_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &ei->i_prealloc_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &pa->pa_lock#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &memcg->move_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#8 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#8 &s->s_inode_wblist_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &c->lock irq_context: 0 (wq_completion)bat_events &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock pool_lock#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock pcpu_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock percpu_counters_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock &blkcg->lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock &blkcg->lock pool_lock#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock key#10 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &dd->lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &base->lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &rq->__lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbinfo->stat_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &dd->lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &base->lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &meta->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock remove_cache_srcu pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx &____s->seqcount#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 remove_cache_srcu &base->lock irq_context: 0 remove_cache_srcu &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu pool_lock#2 irq_context: 0 &ep->mtx uevent_sock_mutex &c->lock irq_context: 0 &ep->mtx rcu_read_lock rcu_node_0 irq_context: 0 &ep->mtx rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#8 &____s->seqcount#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#8 &pcp->lock &zone->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#8 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &xa->xa_lock#8 &____s->seqcount irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &sem->wait_lock irq_context: 0 rcu_read_lock &trie->lock &pcp->lock &zone->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_lock_key#22 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &xa->xa_lock#8 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &ei->i_data_sem irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &____s->seqcount#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &ei->i_data_sem &ei->i_data_sem/1 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &ei->i_data_sem &ei->i_data_sem/1 mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &ei->i_data_sem &ei->i_data_sem/1 pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &mapping->private_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle stock_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &journal->j_state_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem lock#4 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem lock#4 &lruvec->lru_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem lock#5 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &journal->j_state_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &journal->j_state_lock &base->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle lock#4 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle lock#5 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &journal->j_wait_commit irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &journal->j_wait_done_commit irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &folio_wait_table[i] irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &____s->seqcount#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_es_lock key#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &____s->seqcount#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sbi->s_md_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &ei->i_prealloc_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem key#3 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &pa->pa_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) key#14 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem ptlock_ptr(page)#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &memcg->move_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#8 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#8 &s->s_inode_wblist_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &folio_wait_table[i] irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &folio_wait_table[i] &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &folio_wait_table[i] &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &folio_wait_table[i] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock key#10 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#8 key#10 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 lock#4 &lruvec->lru_lock irq_context: 0 rcu_read_lock &trie->lock batched_entropy_u8.lock irq_context: 0 rcu_read_lock &trie->lock kfence_freelist_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &folio_wait_table[i] irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_node_0 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &____s->seqcount#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_raw_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &dd->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &dd->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &____s->seqcount#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &folio_wait_table[i] irq_context: 0 &f->f_pos_lock sb_writers#4 &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &ei->i_raw_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle rcu_read_lock &memcg->move_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle rcu_read_lock &xa->xa_lock#8 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &journal->j_list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &folio_wait_table[i] &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &folio_wait_table[i] &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &folio_wait_table[i] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &folio_wait_table[i] &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock stock_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock pool_lock#2 irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &ei->i_es_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &ei->i_data_sem irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &ei->i_data_sem pool_lock#2 irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock &____s->seqcount#2 irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock &____s->seqcount irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &memcg->move_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &xa->xa_lock#8 irq_context: 0 &f->f_pos_lock sb_writers#4 lock#4 irq_context: 0 &f->f_pos_lock sb_writers#4 lock#5 irq_context: 0 &f->f_pos_lock sb_writers#4 &journal->j_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &journal->j_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &journal->j_state_lock &journal->j_wait_commit irq_context: 0 &f->f_pos_lock sb_writers#4 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 jbd2_handle irq_context: 0 &f->f_pos_lock sb_writers#4 &journal->j_wait_commit irq_context: 0 &f->f_pos_lock sb_writers#4 &journal->j_wait_done_commit irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle bit_wait_table + i irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu quarantine_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle rcu_read_lock &xa->xa_lock#8 key#10 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.gp_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_es_lock key#6 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &ei->i_es_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &ei->i_data_sem &ei->i_data_sem/1 &ei->i_prealloc_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &ei->i_data_sem &ei->i_data_sem/1 &ei->i_prealloc_lock &pa->pa_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &ei->i_data_sem &ei->i_data_sem/1 &mapping->private_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &ei->i_data_sem &ei->i_data_sem/1 &obj_hash[i].lock irq_context: 0 &mm->mmap_lock sb_pagefaults &sem->wait_lock irq_context: 0 &mm->mmap_lock sb_pagefaults &p->pi_lock irq_context: 0 &mm->mmap_lock sb_pagefaults &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem lock#4 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem lock#5 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem ptlock_ptr(page)#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem ptlock_ptr(page)#2 &mapping->private_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem ptlock_ptr(page)#2 &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem ptlock_ptr(page)#2 &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem ptlock_ptr(page)#2 pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem ptlock_ptr(page)#2 key irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->private_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sem->wait_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock stock_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 batched_entropy_u8.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 kfence_freelist_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &meta->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock &n->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock &n->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock &n->lock &____s->seqcount#9 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock nl_table_lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &base->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &base->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock nl_table_wait.lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock lock#8 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock id_table_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock &dir->lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock krc.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock krc.lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock krc.lock &base->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock krc.lock &base->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock &obj_hash[i].lock pool_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock &n->list_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_wait_done_commit irq_context: 0 &mm->mmap_lock sb_pagefaults &journal->j_state_lock irq_context: 0 &mm->mmap_lock sb_pagefaults &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 &mm->mmap_lock sb_pagefaults &journal->j_state_lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock sb_pagefaults &journal->j_state_lock &base->lock irq_context: 0 &mm->mmap_lock sb_pagefaults &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &ret->b_state_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback &meta->lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback kfence_freelist_lock irq_context: 0 bpf_stats_enabled_mutex irq_context: 0 bpf_stats_enabled_mutex &newf->file_lock irq_context: 0 bpf_stats_enabled_mutex fs_reclaim irq_context: 0 bpf_stats_enabled_mutex fs_reclaim &rq->__lock irq_context: 0 bpf_stats_enabled_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex remove_cache_srcu pool_lock#2 irq_context: 0 bpf_stats_enabled_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 bpf_stats_enabled_mutex pool_lock#2 irq_context: 0 bpf_stats_enabled_mutex &xa->xa_lock#4 irq_context: 0 bpf_stats_enabled_mutex &obj_hash[i].lock irq_context: 0 bpf_stats_enabled_mutex stock_lock irq_context: 0 bpf_stats_enabled_mutex &sb->s_type->i_lock_key#15 irq_context: 0 bpf_stats_enabled_mutex &sb->s_type->i_lock_key#15 &dentry->d_lock irq_context: 0 bpf_stats_enabled_mutex cpu_hotplug_lock irq_context: 0 bpf_stats_enabled_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 bpf_stats_enabled_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 bpf_stats_enabled_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 bpf_stats_enabled_mutex cpu_hotplug_lock jump_label_mutex text_mutex &rq->__lock irq_context: 0 bpf_stats_enabled_mutex cpu_hotplug_lock jump_label_mutex text_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem key#3 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle tk_core.seq.seqcount irq_context: 0 sb_writers#4 sb_internal jbd2_handle &journal->j_state_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &journal->j_state_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 sb_internal jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle irq_context: 0 &mm->mmap_lock sb_pagefaults rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle irq_context: 0 sb_writers#4 sb_internal &journal->j_wait_commit irq_context: 0 sb_writers#4 sb_internal &journal->j_wait_done_commit irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle bit_wait_table + i irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_wait_done_commit &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 &xt[i].mutex &meta->lock irq_context: 0 &p->lock remove_cache_srcu pool_lock#2 irq_context: 0 kn->active#5 &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#9 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#9 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &sn->gssp_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &sn->gssp_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &sn->gssp_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &cd->hash_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem cache_list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem cache_list_lock &cd->hash_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (&net->can.stattimer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem xfrm_state_gc_work irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->xfrm.xfrm_state_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &hashinfo->lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-clock-AF_INET6 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nl_table_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&(&net->ipv6.addr_chk_work)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ip6_fl_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &net->rules_mod_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem percpu_counters_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (&net->ipv6.ip6_fib_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (&mrt->ipmr_expire_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex (work_completion)(&ht->run_work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ht->mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ht->mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ht->mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ht->mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &ht->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem __ip_vs_app_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem __ip_vs_app_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem __ip_vs_app_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem __ip_vs_app_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem __ip_vs_app_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem __ip_vs_app_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem __ip_vs_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (&ipvs->dest_trash_timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&(&ipvs->expire_nodest_conn_work)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&(&ipvs->expire_nodest_conn_work)->work) &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&(&ipvs->expire_nodest_conn_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&(&ipvs->defense_work)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ipvs->est_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ipvs->est_mutex pcpu_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ipvs->est_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ipvs->est_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&(&ipvs->est_reload_work)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nfnl_subsys_ipset irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem recent_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem hashlimit_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem trans_gc_work irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_log_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&(&cnet->ecache.dwork)->work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex nf_connlabels_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &k->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_hook_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_hook_mutex cpu_hotplug_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem quarantine_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &sn->pipefs_sb_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex fs_reclaim irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex nl_table_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex nl_table_wait.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem sysfs_symlink_target_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem kernfs_idr_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem sysctl_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem tcp_metrics_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx &mm->mmap_lock sb_pagefaults &rq->__lock irq_context: 0 &ep->mtx &mm->mmap_lock sb_pagefaults &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem key#3 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock &c->lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-clock-AF_INET irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&net->xfrm.policy_hash_work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&net->xfrm.policy_hash_work) &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&net->xfrm.policy_hash_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->xfrm.xfrm_policy_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (work_completion)(&net->xfrm.state_hash_work) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &list->lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &xa->xa_lock#3 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem genl_sk_destructing_waitq.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work rcu_state.barrier_mutex irq_context: 0 (wq_completion)netns net_cleanup_work rcu_state.barrier_mutex rcu_state.barrier_lock irq_context: 0 (wq_completion)netns net_cleanup_work rcu_state.barrier_mutex rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work rcu_state.barrier_mutex &x->wait#24 irq_context: 0 (wq_completion)netns net_cleanup_work rcu_state.barrier_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work rcu_state.barrier_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 uevent_sock_mutex &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work krc.lock irq_context: 0 (wq_completion)netns net_cleanup_work &dir->lock irq_context: 0 (wq_completion)netns net_cleanup_work &dir->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work &dir->lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work stock_lock irq_context: 0 (wq_completion)events_power_efficient (gc_work).work rcu_node_0 irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem pool_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &n->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &rnp->exp_wq[1] irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rnp->exp_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rnp->exp_wq[1] irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex rcu_node_0 irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex &rcu_state.expedited_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 tk_core.seq.seqcount irq_context: 0 &mm->mmap_lock sb_writers#4 &c->lock irq_context: 0 &mm->mmap_lock sb_writers#4 &rq->__lock irq_context: 0 &mm->mmap_lock sb_writers#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle bit_wait_table + i irq_context: 0 &f->f_pos_lock sb_writers#4 &fq->mq_flush_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 &ep->mtx &n->list_lock irq_context: 0 &ep->mtx &n->list_lock &c->lock irq_context: 0 &ep->mtx bus_type_sem &rq->__lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) rcu_read_lock &c->lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) rcu_read_lock &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &journal->j_state_lock &base->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &x->wait#26 irq_context: 0 &f->f_pos_lock sb_writers#4 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &base->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &base->lock &obj_hash[i].lock irq_context: softirq &fq->mq_flush_lock &x->wait#26 &p->pi_lock irq_context: softirq &fq->mq_flush_lock &x->wait#26 &p->pi_lock &rq->__lock irq_context: softirq &fq->mq_flush_lock &x->wait#26 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_state_lock &journal->j_list_lock &obj_hash[i].lock irq_context: 0 &journal->j_state_lock &journal->j_list_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 (&timer.timer) irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &ret->b_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &meta->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock kfence_freelist_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &ei->i_data_sem &sem->wait_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &ei->i_data_sem &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &sem->wait_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 rcu_read_lock &rcu_state.gp_wq irq_context: 0 &f->f_pos_lock sb_writers#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle remove_cache_srcu irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 rcu_read_lock key#10 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 bit_wait_table + i irq_context: softirq rcu_read_lock &xa->xa_lock#8 key#13 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &c->lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &____s->seqcount#2 irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &____s->seqcount irq_context: 0 &mm->mmap_lock sb_pagefaults &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &meta->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kfence_freelist_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_node_0 irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx &sem->wait_lock irq_context: 0 &ep->mtx &root->kernfs_rwsem &sem->wait_lock irq_context: 0 &type->i_mutex_dir_key#4 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &ep->mtx rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tomoyo_ss remove_cache_srcu pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#5 &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &u->iolock &rcu_state.expedited_wq irq_context: 0 &u->iolock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &u->iolock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &u->iolock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex kernfs_idr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex kernfs_idr_lock pool_lock#2 irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_node_0 irq_context: 0 &mm->mmap_lock sb_writers#4 remove_cache_srcu irq_context: 0 &mm->mmap_lock sb_writers#4 remove_cache_srcu &rq->__lock irq_context: 0 &mm->mmap_lock sb_writers#4 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_writers#4 remove_cache_srcu quarantine_lock irq_context: 0 &mm->mmap_lock sb_pagefaults rcu_read_lock rcu_node_0 irq_context: 0 cgroup_threadgroup_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle batched_entropy_u32.lock crngs.lock irq_context: 0 &group->notification_waitq &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &vma->vm_lock->lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cgroup_threadgroup_rwsem rcu_read_lock rcu_node_0 irq_context: 0 cgroup_threadgroup_rwsem rcu_read_lock &rq->__lock irq_context: 0 cgroup_threadgroup_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex quarantine_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &sem->wait_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock stock_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page)#2 key irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock sb_pagefaults &____s->seqcount#2 irq_context: 0 &mm->mmap_lock sb_pagefaults &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex rcu_read_lock rcu_node_0 irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex rcu_read_lock &rq->__lock irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex freezer_mutex.wait_lock irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex.wait_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle lock#4 &lruvec->lru_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &ei->i_data_sem/1 irq_context: 0 cb_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &____s->seqcount#2 irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock &____s->seqcount#2 irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx &rcu_state.expedited_wq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem/1 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait &ei->socket.wq.wait irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait &ei->socket.wq.wait &ep->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait &ei->socket.wq.wait &ep->lock rcu_read_lock &ws->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait &ei->socket.wq.wait &ep->lock rcu_read_lock &ws->lock tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait &ei->socket.wq.wait &ep->lock rcu_read_lock &ws->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait &ei->socket.wq.wait &ep->poll_wait/1 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &fsnotify_mark_srcu &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &fsnotify_mark_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem remove_cache_srcu irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8/4 irq_context: 0 &f->f_pos_lock sb_writers#4 &lruvec->lru_lock irq_context: 0 &f->f_pos_lock sb_writers#4 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &journal->j_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &journal->j_state_lock &base->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle bit_wait_table + i irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 &fsnotify_mark_srcu rcu_node_0 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &journal->j_wait_updates &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&lapb->t1timer) &lapb->lock &list->lock#24 irq_context: softirq (&lapb->t1timer) &lapb->lock &list->lock#25 irq_context: softirq &list->lock#25 irq_context: softirq rcu_read_lock x25_neigh_list_lock irq_context: softirq rcu_read_lock &list->lock#26 irq_context: softirq rcu_read_lock x25_list_lock irq_context: softirq rcu_read_lock x25_forward_list_lock irq_context: 0 &ep->mtx quarantine_lock irq_context: 0 &ep->mtx remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &ep->mtx remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &ep->mtx remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&hwstats->traffic_dw)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults batched_entropy_u8.lock irq_context: 0 &mm->mmap_lock sb_pagefaults kfence_freelist_lock irq_context: 0 &mm->mmap_lock sb_pagefaults &meta->lock irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle &rq->__lock irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock &ei->socket.wq.wait &ep->lock rcu_read_lock &ws->lock irq_context: 0 rcu_read_lock &ei->socket.wq.wait &ep->lock rcu_read_lock &ws->lock tk_core.seq.seqcount irq_context: 0 rcu_read_lock &ei->socket.wq.wait &ep->lock rcu_read_lock &ws->lock &obj_hash[i].lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &fsnotify_mark_srcu &rq->__lock &cfs_rq->removed.lock irq_context: 0 &ep->mtx &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &ep->mtx uevent_sock_mutex &____s->seqcount irq_context: 0 &ep->mtx uevent_sock_mutex rcu_read_lock pool_lock#2 irq_context: 0 &ep->mtx &cfs_rq->removed.lock irq_context: 0 &journal->j_wait_done_commit &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&bat_priv->mcast.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->softif_vlan_list_lock &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->softif_vlan_list_lock &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &ei->i_data_sem &ei->i_data_sem/1 &c->lock irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle &ret->b_state_lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &____s->seqcount#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem batched_entropy_u8.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem kfence_freelist_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &folio_wait_table[i] &p->pi_lock &cfs_rq->removed.lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &____s->seqcount irq_context: 0 sb_writers#4 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem bit_wait_table + i irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock &ret->b_state_lock &journal->j_list_lock irq_context: 0 &mm->mmap_lock sb_writers#4 &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &folio_wait_table[i] &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_writers#4 &journal->j_state_lock irq_context: 0 &mm->mmap_lock sb_writers#4 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 &mm->mmap_lock sb_writers#4 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock sb_writers#4 &journal->j_state_lock &base->lock irq_context: 0 &mm->mmap_lock sb_writers#4 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle bit_wait_table + i irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 remove_cache_srcu irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem bit_wait_table + i irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: softirq (&peer->timer_send_keepalive) irq_context: softirq (&peer->timer_send_keepalive) pool_lock#2 irq_context: softirq (&peer->timer_send_keepalive) &list->lock#14 irq_context: softirq (&peer->timer_send_keepalive) tk_core.seq.seqcount irq_context: softirq (&peer->timer_send_keepalive) rcu_read_lock_bh &r->producer_lock#2 irq_context: softirq (&peer->timer_send_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: softirq (&peer->timer_send_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq (&peer->timer_send_keepalive) rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&peer->timer_send_keepalive) rcu_read_lock_bh &base->lock irq_context: softirq (&peer->timer_send_keepalive) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 batched_entropy_u8.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 kfence_freelist_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &meta->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &ei->i_data_sem &ei->i_data_sem/1 &sem->wait_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &ei->i_data_sem &ei->i_data_sem/1 &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &ei->i_data_sem &ei->i_data_sem/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &ei->i_data_sem &ei->i_data_sem/1 &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &sem->wait_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &p->pi_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle remove_cache_srcu irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle remove_cache_srcu &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#5 fs_reclaim &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 batched_entropy_u8.lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 kfence_freelist_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &____s->seqcount irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page) irq_context: 0 &sighand->siglock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock genl_mutex (console_sem).lock irq_context: 0 cb_lock genl_mutex console_lock console_srcu console_owner_lock irq_context: 0 cb_lock genl_mutex console_lock console_srcu console_owner irq_context: 0 cb_lock genl_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 cb_lock genl_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock &____s->seqcount#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#8 key#10 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) quarantine_lock irq_context: 0 cb_lock genl_mutex uevent_sock_mutex &____s->seqcount irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex &base->lock irq_context: 0 cb_lock genl_mutex &base->lock &obj_hash[i].lock irq_context: 0 &p->lock &of->mutex kn->active#5 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &p->lock &of->mutex kn->active#5 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &cfs_rq->removed.lock irq_context: 0 cb_lock genl_mutex (&timer.timer) irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &xa->xa_lock#4 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &xa->xa_lock#4 &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &xa->xa_lock#4 &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &xa->xa_lock#4 &____s->seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &xa->xa_lock#4 pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 stock_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex rcu_read_lock nl_table_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex rcu_read_lock nl_table_wait.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex rcu_read_lock &c->lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET elock-AF_INET irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 stock_lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx stock_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 bcm_notifier_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex.wait_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &p->pi_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx batched_entropy_u32.lock crngs.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx net_rwsem &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rlock-AF_NETLINK irq_context: 0 cb_lock &rdev->wiphy.mtx &n->list_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &n->list_lock &c->lock irq_context: 0 rcu_read_lock (console_sem).lock irq_context: 0 rcu_read_lock console_lock console_srcu console_owner_lock irq_context: 0 rcu_read_lock console_lock console_srcu console_owner irq_context: 0 rcu_read_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_ROSE irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_ROSE irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_ROSE &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_ROSE &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_ROSE slock-AF_ROSE irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_ROSE irq_context: 0 &sb->s_type->i_mutex_key#10 wlock-AF_ROSE irq_context: 0 &sb->s_type->i_mutex_key#10 &list->lock#27 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_ROSE rose_list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_ROSE &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_ROSE wlock-AF_ROSE irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_ROSE &list->lock#27 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_ROSE rlock-AF_ROSE irq_context: softirq (&peer->timer_retransmit_handshake) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 bcm_notifier_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cgroup_threadgroup_rwsem rcu_node_0 irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rlock-AF_CAN irq_context: 0 &sb->s_type->i_mutex_key#10 elock-AF_CAN irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &xa->xa_lock#8 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &mm->mmap_lock sb_pagefaults remove_cache_srcu &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &dd->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &dd->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &hashinfo->ehash_locks[i] irq_context: 0 sk_lock-AF_INET6 rcu_read_lock tcp_metrics_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock tcp_metrics_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock tcp_metrics_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 &base->lock irq_context: 0 sk_lock-AF_INET6 &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &hashinfo->ehash_locks[i] irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &ei->i_es_lock key#6 irq_context: 0 sk_lock-AF_INET rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock bit_wait_table + i irq_context: 0 &ep->mtx &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)rcu_gp &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle hrtimer_bases.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 rtnl_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 pidmap_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 loop_validate_mutex &lo->lo_mutex &rq->__lock irq_context: 0 loop_validate_mutex &lo->lo_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu quarantine_lock irq_context: 0 loop_validate_mutex loop_validate_mutex.wait_lock irq_context: 0 loop_validate_mutex &rq->__lock irq_context: 0 loop_validate_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 loop_validate_mutex.wait_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &mapping->private_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock batched_entropy_u8.lock crngs.lock irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu quarantine_lock irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu &c->lock irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu &n->list_lock irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu &obj_hash[i].lock irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 &ret->b_state_lock &journal->j_list_lock &meta->lock irq_context: 0 &ret->b_state_lock &journal->j_list_lock kfence_freelist_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &____s->seqcount#2 irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 lock prog_idr_lock &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex text_mutex.wait_lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex.wait_lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex &p->pi_lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex &p->pi_lock &rq->__lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex &rq->__lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &n->list_lock &c->lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rcu_read_lock fastopen_seqlock.seqcount irq_context: softirq fs/notify/mark.c:89 rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rds_sock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 lock pidmap_lock &____s->seqcount#2 irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock fs_reclaim irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock &c->lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock pool_lock#2 irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock rlock-AF_BLUETOOTH irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock hci_sk_list.lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_RDS irq_context: 0 &sb->s_type->i_mutex_key#10 &rs->rs_recv_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rds_cong_monitor_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rds_cong_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &rs->rs_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &rs->rs_rdma_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &q->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rds_sock_lock irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) rlock-AF_NETLINK irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) &dir->lock irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] &n->list_lock irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &c->lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &____s->seqcount#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_es_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_es_lock kfence_freelist_lock irq_context: softirq (&peer->timer_send_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rcu_read_lock &____s->seqcount#7 irq_context: 0 rcu_read_lock &nf_nat_locks[i] irq_context: 0 rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 rcu_read_lock &dir->lock#2 irq_context: 0 rcu_read_lock &tbl->lock irq_context: 0 rcu_read_lock &____s->seqcount#9 irq_context: 0 rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 irq_context: 0 rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 rcu_read_lock &ndev->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 icmp_global.lock irq_context: 0 rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 icmp_global.lock batched_entropy_u8.lock irq_context: 0 rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock stock_lock irq_context: 0 rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 k-slock-AF_INET6 rcu_read_lock &c->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 k-slock-AF_INET6 rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 k-slock-AF_INET6 rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 k-slock-AF_INET6 rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 k-slock-AF_INET6 rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 k-slock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 k-slock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 k-slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 pool_lock#2 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock jump_label_mutex text_mutex &rq->__lock irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 kn->active#5 &rq->__lock irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock &c->lock irq_context: 0 &p->lock fs_reclaim &rq->__lock irq_context: 0 &p->lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle key#4 irq_context: 0 sb_writers#4 sb_internal mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 sb_internal mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock pgd_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock key irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock pcpu_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock percpu_counters_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &____s->seqcount#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &wb->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &ei->i_data_sem &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &ei->i_es_lock &meta->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &ei->i_es_lock kfence_freelist_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu pool_lock#2 irq_context: 0 remove_cache_srcu pgd_lock irq_context: 0 remove_cache_srcu key irq_context: 0 remove_cache_srcu pcpu_lock irq_context: 0 remove_cache_srcu percpu_counters_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 lock#5 &lruvec->lru_lock irq_context: 0 &mm->mmap_lock lock#5 &lruvec->lru_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock lock#5 &lruvec->lru_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &tbl->lock &____s->seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock pool_lock irq_context: softirq (&p->forward_delay_timer) irq_context: softirq (&p->forward_delay_timer) &br->lock irq_context: softirq (&p->forward_delay_timer) &br->lock rcu_read_lock pool_lock#2 irq_context: softirq (&p->forward_delay_timer) &br->lock rcu_read_lock nl_table_lock irq_context: softirq (&p->forward_delay_timer) &br->lock rcu_read_lock &obj_hash[i].lock irq_context: softirq (&p->forward_delay_timer) &br->lock rcu_read_lock nl_table_wait.lock irq_context: softirq (&p->forward_delay_timer) &br->lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock crngs.lock irq_context: 0 ebt_mutex &mm->mmap_lock &rq->__lock irq_context: 0 ebt_mutex &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &(&net->nexthop.notifier_chain)->rwsem irq_context: 0 rtnl_mutex &(&net->nexthop.notifier_chain)->rwsem &data->nh_lock irq_context: 0 rtnl_mutex &(&net->nexthop.notifier_chain)->rwsem &data->nh_lock fs_reclaim irq_context: 0 rtnl_mutex &(&net->nexthop.notifier_chain)->rwsem &data->nh_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &(&net->nexthop.notifier_chain)->rwsem &data->nh_lock pool_lock#2 irq_context: 0 rtnl_mutex &(&net->nexthop.notifier_chain)->rwsem &data->nh_lock rcu_read_lock rhashtable_bucket irq_context: 0 &xt[i].mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 &xt[i].mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &xt[i].mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &xt[i].mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy15 irq_context: 0 (wq_completion)phy15 (work_completion)(&local->reconfig_filter) irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy15 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &ei->i_es_lock key#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &c->lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 pfkey_mutex irq_context: 0 &net->xfrm.xfrm_cfg_mutex &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &____s->seqcount#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex &____s->seqcount irq_context: 0 &net->xfrm.xfrm_cfg_mutex &c->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex (kmod_concurrent_max).lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &pool->lock/1 irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &x->wait#17 irq_context: 0 &net->xfrm.xfrm_cfg_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#15 &____s->seqcount#2 irq_context: 0 &type->i_mutex_dir_key#3 stock_lock irq_context: 0 &nft_net->commit_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &group->mark_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &group->mark_mutex remove_cache_srcu irq_context: 0 &group->mark_mutex remove_cache_srcu quarantine_lock irq_context: 0 &group->mark_mutex remove_cache_srcu &c->lock irq_context: 0 &group->mark_mutex remove_cache_srcu &n->list_lock irq_context: 0 &group->mark_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &group->mark_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 &group->mark_mutex remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex running_helpers_waitq.lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex xfrm_state_gc_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &pool->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rlock-AF_KEY irq_context: 0 &sb->s_type->i_mutex_key#10 pfkey_mutex irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_KEY irq_context: 0 &sb->s_type->i_mutex_key#10 wlock-AF_KEY irq_context: 0 (wq_completion)events xfrm_state_gc_work irq_context: 0 (wq_completion)events xfrm_state_gc_work xfrm_state_gc_lock irq_context: 0 (wq_completion)events xfrm_state_gc_work &rnp->exp_lock irq_context: 0 (wq_completion)events xfrm_state_gc_work rcu_state.exp_mutex irq_context: 0 (wq_completion)events xfrm_state_gc_work rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)events xfrm_state_gc_work rcu_state.exp_mutex &pool->lock irq_context: 0 (wq_completion)events xfrm_state_gc_work rcu_state.exp_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events xfrm_state_gc_work rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events xfrm_state_gc_work rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events xfrm_state_gc_work rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)events xfrm_state_gc_work rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex.wait_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 rlock-AF_KEY irq_context: 0 (wq_completion)events xfrm_state_gc_work rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)events xfrm_state_gc_work rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events xfrm_state_gc_work rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events xfrm_state_gc_work rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events xfrm_state_gc_work rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex &rnp->exp_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_state.exp_mutex irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)events xfrm_state_gc_work rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)events xfrm_state_gc_work &p->pi_lock irq_context: 0 (wq_completion)events xfrm_state_gc_work &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events xfrm_state_gc_work &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events xfrm_state_gc_work &obj_hash[i].lock irq_context: 0 (wq_completion)events xfrm_state_gc_work (&x->rtimer) irq_context: 0 (wq_completion)events xfrm_state_gc_work &base->lock irq_context: 0 (wq_completion)events xfrm_state_gc_work pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 lock#5 &lruvec->lru_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &ei->i_data_sem &c->lock irq_context: 0 cb_lock batched_entropy_u8.lock irq_context: 0 cb_lock kfence_freelist_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock quarantine_lock irq_context: 0 rcu_read_lock &ul->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#13 irq_context: 0 rcu_read_lock rcu_read_lock &(&bp->lock)->lock irq_context: 0 rcu_read_lock rcu_read_lock &(&bp->lock)->lock &____s->seqcount#13 irq_context: 0 rcu_read_lock rcu_read_lock &(&bp->lock)->lock &____s->seqcount#13 &____s->seqcount#2 irq_context: 0 rcu_read_lock rcu_read_lock &(&bp->lock)->lock &____s->seqcount#13 &____s->seqcount irq_context: 0 rcu_read_lock rcu_read_lock &(&bp->lock)->lock &____s->seqcount#13 pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock &(&bp->lock)->lock &____s->seqcount#13 &c->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rhashtable_bucket irq_context: 0 rcu_read_lock &q->lock#2 irq_context: 0 rcu_read_lock &q->lock#2 &obj_hash[i].lock irq_context: 0 rcu_read_lock &q->lock#2 pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &u->bindlock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &u->bindlock &net->unx.table.locks[i] irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &u->bindlock &net->unx.table.locks[i] &net->unx.table.locks[i]/1 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &u->bindlock &net->unx.table.locks[i] &net->unx.table.locks[i]/1 &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &u->bindlock &net->unx.table.locks[i]/1 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &u->bindlock &bsd_socket_locks[i] irq_context: 0 &u->iolock stock_lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET &n->list_lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &u->iolock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tomoyo_ss rcu_read_lock &cfs_rq->removed.lock irq_context: 0 tomoyo_ss rcu_read_lock &obj_hash[i].lock irq_context: 0 &u->iolock &mm->mmap_lock &rq->__lock irq_context: 0 &u->iolock &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events xfrm_state_gc_work rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 &u->iolock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &u->iolock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &u->iolock rcu_read_lock &rcu_state.gp_wq irq_context: 0 &u->iolock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &u->iolock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &u->iolock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 lock#5 irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 lock#5 &lruvec->lru_lock irq_context: 0 sb_writers#8 tomoyo_ss quarantine_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mm->context.lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mm->context.lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 lock#5 &lruvec->lru_lock irq_context: 0 tracepoints_mutex tasklist_lock irq_context: 0 rcu_read_lock &sighand->siglock stock_lock irq_context: 0 &type->i_mutex_dir_key#5 rcu_read_lock rcu_node_0 irq_context: 0 &type->i_mutex_dir_key#5 rcu_read_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#5 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 krc.lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 krc.lock &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 krc.lock &base->lock &obj_hash[i].lock irq_context: 0 &nft_net->commit_mutex &n->list_lock irq_context: 0 &nft_net->commit_mutex &n->list_lock &c->lock irq_context: 0 &type->i_mutex_dir_key#5 rcu_read_lock &p->pi_lock irq_context: 0 &type->i_mutex_dir_key#5 rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#5 rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#5 rcu_node_0 irq_context: 0 &type->i_mutex_dir_key#5 &rcu_state.gp_wq irq_context: 0 &type->i_mutex_dir_key#5 &rcu_state.gp_wq &p->pi_lock irq_context: 0 &type->i_mutex_dir_key#5 &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#5 &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock &sighand->siglock &n->list_lock irq_context: 0 rcu_read_lock &sighand->siglock &n->list_lock &c->lock irq_context: 0 rcu_read_lock &sighand->siglock &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 tomoyo_ss stock_lock irq_context: 0 tomoyo_ss pcpu_lock stock_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &ul->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle &mapping->private_lock irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 &mm->mmap_lock sb_writers#4 &sb->s_type->i_lock_key#22 irq_context: 0 &mm->mmap_lock sb_writers#4 &wb->list_lock irq_context: 0 &mm->mmap_lock sb_writers#4 &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem kernfs_idr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)inet_frag_wq (work_completion)(&fqdir->destroy_work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)inet_frag_wq (work_completion)(&fqdir->destroy_work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)inet_frag_wq (work_completion)(&fqdir->destroy_work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &hashinfo->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &bsd_socket_locks[i] irq_context: 0 &sb->s_type->i_mutex_key#10 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &dentry->d_lock &lru->node[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &meta->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem kfence_freelist_lock irq_context: 0 (wq_completion)netns net_cleanup_work rcu_state.barrier_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work rcu_state.barrier_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work rcu_state.barrier_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 clock-AF_LLC irq_context: 0 (wq_completion)netns net_cleanup_work &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_LLC irq_context: 0 sk_lock-AF_LLC slock-AF_LLC irq_context: 0 sk_lock-AF_LLC fs_reclaim irq_context: 0 sk_lock-AF_LLC fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_LLC pool_lock#2 irq_context: 0 sk_lock-AF_LLC &dir->lock#2 irq_context: 0 sk_lock-AF_LLC &sap->sk_lock irq_context: 0 slock-AF_LLC irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_LLC irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_LLC slock-AF_LLC irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_LLC &sap->sk_lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_LLC irq_context: 0 &sb->s_type->i_mutex_key#10 &dir->lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 (&llc->pf_cycle_timer.timer) irq_context: 0 &sb->s_type->i_mutex_key#10 &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 (&llc->ack_timer.timer) irq_context: 0 &sb->s_type->i_mutex_key#10 (&llc->rej_sent_timer.timer) irq_context: 0 &sb->s_type->i_mutex_key#10 (&llc->busy_state_timer.timer) irq_context: 0 &sb->s_type->i_mutex_key#10 rlock-AF_LLC irq_context: 0 &sb->s_type->i_mutex_key#10 wlock-AF_LLC irq_context: 0 &sb->s_type->i_mutex_key#10 &list->lock#28 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &____s->seqcount#2 irq_context: 0 &nft_net->commit_mutex rcu_read_lock &c->lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock jump_label_mutex text_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &xa->xa_lock#8 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem lock#4 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem lock#5 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 batched_entropy_u8.lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 kfence_freelist_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle lock#4 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle lock#5 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sbi->s_md_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &ei->i_prealloc_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem key#3 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &pa->pa_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) key#14 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem ptlock_ptr(page)#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &mapping->private_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &memcg->move_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#8 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#8 &s->s_inode_wblist_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#8 key#10 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock key#10 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &dd->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &dd->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &base->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &folio_wait_table[i] irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 lock#5 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rsp->gp_wait irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &journal->j_wait_updates irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &ei->i_data_sem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sem->waiters irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &rsp->gp_wait irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &rsp->gp_wait &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &rsp->gp_wait pool_lock#2 irq_context: softirq rcu_callback &rsp->gp_wait irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#9 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#9 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &mm->mmap_lock &rq->__lock irq_context: 0 &f->f_pos_lock &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tracepoints_mutex &c->lock irq_context: 0 tracepoints_mutex &rq->__lock irq_context: 0 tracepoints_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock &sighand->siglock &sighand->signalfd_wqh irq_context: 0 rcu_read_lock &sighand->siglock &sighand->signalfd_wqh &ep->lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) tracepoint_srcu_srcu_usage.lock &ACCESS_PRIVATE(sdp, lock) irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &ei->i_es_lock key#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &base->lock irq_context: 0 &mm->mmap_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &ACCESS_PRIVATE(ssp->srcu_sup, lock) irq_context: softirq (&wq_watchdog_timer) &obj_hash[i].lock irq_context: softirq (&wq_watchdog_timer) &base->lock irq_context: softirq (&wq_watchdog_timer) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock lock#5 &lruvec->lru_lock irq_context: 0 &f->f_pos_lock &lock->wait_lock irq_context: 0 &ei->i_data_sem &ei->i_prealloc_lock &pa->pa_lock#2 irq_context: 0 &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &ei->i_data_sem pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_lock_key#22 &xa->xa_lock#8 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_lock_key#22 &xa->xa_lock#8 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_lock_key#22 &xa->xa_lock#8 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 lock#5 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &lruvec->lru_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem key#14 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock &pa->pa_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem key#3 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &pa->pa_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle stock_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &xa->xa_lock#8 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &xa->xa_lock#8 stock_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &xa->xa_lock#8 &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &xa->xa_lock#8 &____s->seqcount#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &xa->xa_lock#8 &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &xa->xa_lock#8 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle lock#4 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &mapping->private_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &dd->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &pool->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &memcg->move_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &xa->xa_lock#8 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock &pa->pa_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_raw_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_wait_updates irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock &sighand->siglock &p->pi_lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock &local->rx_path_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &journal->j_list_lock irq_context: 0 sb_writers#4 lock#5 &lruvec->lru_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &bgl->locks[i].lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock lock#5 &lruvec->lru_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock lock#5 &lruvec->lru_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem lock#5 &lruvec->lru_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 lock#5 &lruvec->lru_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle bit_wait_table + i irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ret->b_state_lock bit_wait_table + i irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu pool_lock#2 irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu &rq->__lock irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle bit_wait_table + i irq_context: 0 rtnl_mutex lapb_list_lock batched_entropy_u8.lock irq_context: 0 rtnl_mutex lapb_list_lock kfence_freelist_lock irq_context: 0 &u->iolock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_PACKET &rq->__lock irq_context: 0 sk_lock-AF_PACKET &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rnp->exp_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: softirq &(&bat_priv->bla.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&bat_priv->bla.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex.wait_lock irq_context: 0 sk_lock-AF_PACKET &p->pi_lock irq_context: 0 sk_lock-AF_PACKET &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_PACKET &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 sb_writers#4 quarantine_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &sem->wait_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &sem->wait_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &folio_wait_table[i] irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &journal->j_state_lock &journal->j_wait_reserved irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &ext4__ioend_wq[i] irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 uts_sem &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &u->iolock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &u->iolock &mm->mmap_lock &sem->wait_lock irq_context: 0 &u->iolock &mm->mmap_lock &cfs_rq->removed.lock irq_context: 0 &u->iolock &mm->mmap_lock &obj_hash[i].lock irq_context: 0 &u->iolock &mm->mmap_lock pool_lock#2 irq_context: 0 tracepoints_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 tracepoints_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 tracepoints_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 tracepoints_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 tracepoints_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 tracepoints_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 tracepoints_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tracepoints_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 tracepoints_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tracepoints_mutex &ACCESS_PRIVATE(sdp, lock) irq_context: 0 tracepoints_mutex tracepoint_srcu irq_context: 0 tracepoints_mutex &x->wait#3 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock &meta->lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) &base->lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) &base->lock &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#5 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock &meta->lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex ima_extend_list_mutex &n->list_lock irq_context: 0 &iint->mutex ima_extend_list_mutex &n->list_lock &c->lock irq_context: 0 &iint->mutex ima_extend_list_mutex &rq->__lock irq_context: 0 &iint->mutex ima_extend_list_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem lock#5 &lruvec->lru_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle lock#5 &lruvec->lru_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_reserved irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem remove_cache_srcu irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem remove_cache_srcu quarantine_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem remove_cache_srcu &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem remove_cache_srcu &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem remove_cache_srcu &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem remove_cache_srcu &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem remove_cache_srcu pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle lock#5 &lruvec->lru_lock irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 tomoyo_ss rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock icmp_global.lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock icmp_global.lock batched_entropy_u8.lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 lock#5 &lruvec->lru_lock irq_context: 0 sk_lock-AF_INET remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &cfs_rq->removed.lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock &pa->pa_lock#2 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock &meta->lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock kfence_freelist_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) irq_context: 0 remove_cache_srcu &meta->lock irq_context: 0 remove_cache_srcu kfence_freelist_lock irq_context: 0 sb_writers#5 lock#5 &lruvec->lru_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &c->lock irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 nf_conntrack_mutex irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 nf_conntrack_mutex &nf_conntrack_locks[i] irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 nf_conntrack_mutex &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &meta_group_info[i]->alloc_sem &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &meta_group_info[i]->alloc_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock &meta->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &obj_hash[i].lock pool_lock irq_context: 0 &mm->mmap_lock &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &tsk->futex_exit_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock &c->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock &n->list_lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock &n->list_lock &c->lock irq_context: 0 fs_reclaim &rq->__lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tracepoints_mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 tracepoints_mutex tracepoints_mutex.wait_lock irq_context: 0 tracepoints_mutex.wait_lock irq_context: 0 &xt[i].mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &xt[i].mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tracepoints_mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 tracepoints_mutex rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 tracepoints_mutex rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock pcpu_lock stock_lock irq_context: 0 kn->active#5 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 isotp_notifier_lock irq_context: 0 &sb->s_type->i_mutex_key#10 isotp_notifier_lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &n->list_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &n->list_lock &c->lock irq_context: 0 sb_writers#5 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#5 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock pool_lock#2 irq_context: 0 &group->mark_mutex &cfs_rq->removed.lock irq_context: 0 &group->mark_mutex &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 sb_internal rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 stock_lock irq_context: 0 sk_lock-AF_INET6 mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET6 &sb->s_type->i_lock_key#8 irq_context: 0 sk_lock-AF_INET6 &dir->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 k-slock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 pool_lock#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 &dir->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 fs_reclaim irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 k-clock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 crngs.lock irq_context: 0 sk_lock-AF_INET6 &token_hash[i].lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock stock_lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &____s->seqcount#8 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &hashinfo->ehash_locks[i] irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 batched_entropy_u32.lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &rq->__lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 fs_reclaim irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 pool_lock#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock &n->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock &n->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock &n->lock &____s->seqcount#9 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock nl_table_lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock nl_table_wait.lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock lock#8 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock id_table_lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock &dir->lock#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock krc.lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock &c->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock &n->list_lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock kfence_freelist_lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock &meta->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &p->lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &p->lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &base->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 slock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock tcp_metrics_lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock tcp_metrics_lock &c->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock tcp_metrics_lock &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock tcp_metrics_lock &n->list_lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock tcp_metrics_lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock tcp_metrics_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &c->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 &hashinfo->ehash_locks[i] irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 (&req->rsk_timer) irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 &base->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 &queue->rskq_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 k-clock-AF_INET6 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 k-clock-AF_INET6 rcu_read_lock &pool->lock/1 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 k-clock-AF_INET6 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 k-clock-AF_INET6 rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 k-clock-AF_INET6 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 k-clock-AF_INET6 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 k-clock-AF_INET6 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 &ei->socket.wq.wait irq_context: 0 sk_lock-AF_INET6 slock-AF_INET6 rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)krdsd irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) fs_reclaim irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 k-sk_lock-AF_INET6 cpu_hotplug_lock irq_context: 0 k-sk_lock-AF_INET6 cpu_hotplug_lock jump_label_mutex irq_context: 0 k-sk_lock-AF_INET6 cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 k-sk_lock-AF_INET6 cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 k-sk_lock-AF_INET6 (console_sem).lock irq_context: 0 k-sk_lock-AF_INET6 console_lock console_srcu console_owner_lock irq_context: 0 k-sk_lock-AF_INET6 console_lock console_srcu console_owner irq_context: 0 k-sk_lock-AF_INET6 console_lock console_srcu console_owner &port_lock_key irq_context: 0 k-sk_lock-AF_INET6 console_lock console_srcu console_owner console_owner_lock irq_context: 0 k-sk_lock-AF_INET6 &rq->__lock irq_context: 0 k-sk_lock-AF_INET6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &sb->s_type->i_lock_key#8 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 &queue->rskq_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-slock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 clock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 &rq->__lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 &base->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 &dir->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 fs_reclaim irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 tk_core.seq.seqcount irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &xa->xa_lock#8 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &fsnotify_mark_srcu irq_context: 0 k-sk_lock-AF_INET6 tk_core.seq.seqcount irq_context: 0 k-sk_lock-AF_INET6 slock-AF_INET6 irq_context: 0 k-sk_lock-AF_INET6 rcu_read_lock &pool->lock irq_context: 0 k-sk_lock-AF_INET6 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 k-sk_lock-AF_INET6 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 k-sk_lock-AF_INET6 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 k-sk_lock-AF_INET6 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 k-sk_lock-AF_INET6 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&msk->work) irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET6 irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET6 slock-AF_INET6 irq_context: 0 (wq_completion)events (work_completion)(&msk->work) slock-AF_INET6 irq_context: 0 k-sk_lock-AF_INET6 &obj_hash[i].lock irq_context: 0 k-sk_lock-AF_INET6 &base->lock irq_context: 0 k-sk_lock-AF_INET6 &base->lock &obj_hash[i].lock irq_context: softirq (&icsk->icsk_delack_timer) k-slock-AF_INET6 irq_context: softirq (&icsk->icsk_delack_timer) k-slock-AF_INET6 tk_core.seq.seqcount irq_context: softirq (&icsk->icsk_delack_timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq (&icsk->icsk_delack_timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq (&icsk->icsk_delack_timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq (&icsk->icsk_delack_timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 &____s->seqcount#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 &c->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 &base->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 &hashinfo->ehash_locks[i] irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 elock-AF_INET6 irq_context: softirq rcu_read_lock rcu_read_lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET6 slock-AF_INET6 &sk->sk_lock.wq irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET6 &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET6 &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &hashinfo->ehash_locks[i] irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 &____s->seqcount irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock pool_lock#2 irq_context: 0 slock-AF_INET6 &sk->sk_lock.wq irq_context: 0 slock-AF_INET6 &sk->sk_lock.wq &p->pi_lock irq_context: 0 slock-AF_INET6 &sk->sk_lock.wq &p->pi_lock &rq->__lock irq_context: 0 slock-AF_INET6 &sk->sk_lock.wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 k-slock-AF_INET6 irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 slock-AF_INET6 irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 k-slock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &sb->s_type->i_lock_key#8 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &xa->xa_lock#8 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &fsnotify_mark_srcu irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 k-clock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 k-slock-AF_INET6 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 k-slock-AF_INET6 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 k-slock-AF_INET6 krc.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 k-slock-AF_INET6 elock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &dir->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &token_hash[i].lock irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 &u->iolock &pcp->lock &zone->lock irq_context: 0 &u->iolock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 &pcp->lock &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 cb_lock &dir->lock#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 &c->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 batched_entropy_u32.lock crngs.lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 slock-AF_INET6 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 rcu_read_lock &pool->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 rcu_read_lock rcu_node_0 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)events (debug_obj_work).work &meta->lock irq_context: 0 (wq_completion)events (debug_obj_work).work kfence_freelist_lock irq_context: 0 rcu_read_lock stock_lock irq_context: 0 rcu_read_lock pcpu_lock stock_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock &ei->i_raw_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &bgl->locks[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &rq->__lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 k-clock-AF_INET6 rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &c->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &n->list_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex &n->list_lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET6 rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET6 rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 &n->list_lock irq_context: 0 cb_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ei->i_prealloc_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &pa->pa_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &mapping->private_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ret->b_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ret->b_state_lock &journal->j_list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (reaper_work).work &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &pa->pa_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &lg->lg_prealloc_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &base->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &sem->wait_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &sem->wait_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &p->pi_lock irq_context: 0 sk_lock-AF_INET6 &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 &rq->__lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 &c->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 &n->list_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem key#14 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &journal->j_revoke_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle stock_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &xa->xa_lock#8 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &xa->xa_lock#8 stock_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &xa->xa_lock#8 pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle lock#4 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &mapping->private_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &xa->xa_lock#8 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle lock#4 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->private_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock stock_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sb->s_type->i_lock_key#22 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sb->s_type->i_lock_key#22 &xa->xa_lock#8 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sb->s_type->i_lock_key#22 &xa->xa_lock#8 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &sb->s_type->i_lock_key#22 &xa->xa_lock#8 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock lock#4 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock lock#5 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &lruvec->lru_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem key#3 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &journal->j_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &ei->i_data_sem &ei->i_data_sem/1 &bgl->locks[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &bgl->locks[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 remove_cache_srcu irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 remove_cache_srcu quarantine_lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 remove_cache_srcu &c->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 remove_cache_srcu &n->list_lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6 remove_cache_srcu pool_lock#2 irq_context: 0 &fsnotify_mark_srcu &____s->seqcount#2 irq_context: 0 &fsnotify_mark_srcu &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem remove_cache_srcu pool_lock#2 irq_context: 0 pernet_ops_rwsem per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 &n->list_lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 k-sk_lock-AF_INET6/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &root->kernfs_rwsem pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &xt[i].mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &xt[i].mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex.wait_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &p->pi_lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex &wq->mutex &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_lock_key#22 &xa->xa_lock#8 &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &fsnotify_mark_srcu &rq->__lock irq_context: 0 sb_writers#4 &fsnotify_mark_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &xa->xa_lock#8 batched_entropy_u8.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rnp->exp_wq[3] irq_context: 0 &mm->mmap_lock &xa->xa_lock#8 kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 event_mutex irq_context: 0 event_mutex sched_register_mutex irq_context: 0 event_mutex sched_register_mutex tracepoints_mutex irq_context: 0 event_mutex sched_register_mutex tracepoints_mutex fs_reclaim irq_context: 0 event_mutex sched_register_mutex tracepoints_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 event_mutex sched_register_mutex tracepoints_mutex pool_lock#2 irq_context: 0 event_mutex sched_register_mutex tracepoints_mutex cpu_hotplug_lock irq_context: 0 event_mutex sched_register_mutex tracepoints_mutex cpu_hotplug_lock static_call_mutex irq_context: 0 event_mutex sched_register_mutex tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex irq_context: 0 event_mutex sched_register_mutex tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 event_mutex sched_register_mutex tracepoints_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 event_mutex sched_register_mutex tracepoints_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 event_mutex sched_register_mutex tracepoints_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 event_mutex sched_register_mutex tracepoints_mutex &c->lock irq_context: 0 event_mutex tracepoints_mutex irq_context: 0 event_mutex tracepoints_mutex fs_reclaim irq_context: 0 event_mutex tracepoints_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 event_mutex tracepoints_mutex pool_lock#2 irq_context: 0 event_mutex tracepoints_mutex cpu_hotplug_lock irq_context: 0 event_mutex tracepoints_mutex cpu_hotplug_lock static_call_mutex irq_context: 0 event_mutex tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex irq_context: 0 event_mutex tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 event_mutex tracepoints_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 event_mutex tracepoints_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 event_mutex tracepoints_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 &mm->mmap_lock &xa->xa_lock#4 irq_context: 0 &mm->mmap_lock &xa->xa_lock#4 pool_lock#2 irq_context: 0 &mm->mmap_lock &sb->s_type->i_lock_key irq_context: 0 &mm->mmap_lock &s->s_inode_list_lock irq_context: 0 &mm->mmap_lock batched_entropy_u32.lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_lock_key &dentry->d_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 lock link_idr_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &xa->xa_lock#8 &c->lock irq_context: 0 &mm->mmap_lock &xa->xa_lock#8 &n->list_lock irq_context: 0 &mm->mmap_lock &xa->xa_lock#8 &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem &sem->wait_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &sem->wait_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss quarantine_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_es_lock key#6 irq_context: 0 lock btf_idr_lock irq_context: 0 lock btf_idr_lock pool_lock#2 irq_context: 0 rtnl_mutex &tun->lock irq_context: 0 rtnl_mutex wlock-AF_UNSPEC irq_context: 0 rtnl_mutex elock-AF_UNSPEC irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock rcu_read_lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex dev_base_lock &xa->xa_lock#3 irq_context: 0 rtnl_mutex cpu_hotplug_lock &list->lock#5 irq_context: 0 rtnl_mutex bpf_devs_lock irq_context: 0 rtnl_mutex &hwstats->hwsdev_list_lock irq_context: 0 rtnl_mutex &ul->lock irq_context: 0 rtnl_mutex &net->xdp.lock irq_context: 0 rtnl_mutex mirred_list_lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER krc.lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 kn->active#5 remove_cache_srcu pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_query_lock irq_context: 0 rtnl_mutex &idev->mc_query_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex (work_completion)(&(&idev->mc_report_work)->work) irq_context: 0 rtnl_mutex &idev->mc_report_lock irq_context: 0 rtnl_mutex &idev->mc_lock krc.lock irq_context: 0 rtnl_mutex &pnn->pndevs.lock irq_context: 0 rtnl_mutex &pnn->routes.lock irq_context: 0 rtnl_mutex dev_pm_qos_sysfs_mtx irq_context: 0 rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 rtnl_mutex dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 rtnl_mutex subsys mutex#17 &k->k_lock klist_remove_lock irq_context: 0 rtnl_mutex deferred_probe_mutex irq_context: 0 rtnl_mutex device_links_lock irq_context: 0 rtnl_mutex uevent_sock_mutex mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex kernfs_idr_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex kernfs_idr_lock pool_lock#2 irq_context: 0 rcu_state.barrier_mutex rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 rcu_state.barrier_mutex &rq->__lock irq_context: 0 rcu_state.barrier_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_query_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_query_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_query_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_query_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_query_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex rcu_state.barrier_mutex.wait_lock irq_context: 0 sb_writers#4 sb_internal &n->list_lock irq_context: 0 sb_writers#4 sb_internal &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock pcpu_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock percpu_counters_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock &blkcg->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock &blkcg->lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock &blkcg->lock pool_lock#2 irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) rcu_read_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) rcu_read_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&hctx->run_work)->work) rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rcu_state.barrier_mutex.wait_lock irq_context: 0 dev_base_lock irq_context: 0 lweventlist_lock irq_context: 0 &tun->lock irq_context: 0 krc.lock irq_context: 0 &dir->lock#2 irq_context: 0 &dir->lock#2 &obj_hash[i].lock irq_context: 0 &dir->lock#2 pool_lock#2 irq_context: 0 netdev_unregistering_wq.lock irq_context: softirq &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu &rq->__lock irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER &n->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex lock kernfs_idr_lock &n->list_lock irq_context: 0 rtnl_mutex lock kernfs_idr_lock &n->list_lock &c->lock irq_context: 0 misc_mtx remove_cache_srcu irq_context: 0 misc_mtx remove_cache_srcu quarantine_lock irq_context: 0 misc_mtx remove_cache_srcu &c->lock irq_context: 0 misc_mtx remove_cache_srcu &n->list_lock irq_context: 0 misc_mtx remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 misc_mtx remove_cache_srcu &obj_hash[i].lock irq_context: 0 misc_mtx remove_cache_srcu &rq->__lock irq_context: 0 misc_mtx remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 event_mutex &rq->__lock irq_context: 0 event_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &xa->xa_lock#8 &____s->seqcount#2 irq_context: 0 &mm->mmap_lock &xa->xa_lock#8 &pcp->lock &zone->lock irq_context: 0 &mm->mmap_lock &xa->xa_lock#8 &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex qdisc_mod_lock irq_context: 0 rtnl_mutex &block->lock irq_context: 0 rtnl_mutex &block->cb_lock irq_context: 0 rtnl_mutex &block->cb_lock flow_indr_block_lock irq_context: 0 rtnl_mutex &block->cb_lock flow_indr_block_lock fs_reclaim irq_context: 0 rtnl_mutex &block->cb_lock flow_indr_block_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &block->cb_lock flow_indr_block_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 rtnl_mutex &block->cb_lock flow_indr_block_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &block->cb_lock flow_indr_block_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex quarantine_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &sem->wait_lock irq_context: 0 &p->lock &mm->mmap_lock &rq->__lock irq_context: 0 &p->lock &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 tomoyo_ss &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 tomoyo_ss &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex fs_reclaim &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex stock_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#7 &n->list_lock &c->lock irq_context: 0 &type->i_mutex_dir_key#7 &rq->__lock irq_context: 0 &type->i_mutex_dir_key#7 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#58 fs_reclaim irq_context: 0 kn->active#58 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#58 stock_lock irq_context: 0 kn->active#58 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#58 &kernfs_locks->open_file_mutex[count] &rq->__lock irq_context: 0 kn->active#58 &kernfs_locks->open_file_mutex[count] &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#58 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#58 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#58 &c->lock irq_context: 0 sb_writers#11 stock_lock irq_context: 0 sb_writers#11 &c->lock irq_context: 0 sb_writers#11 &n->list_lock irq_context: 0 sb_writers#11 &n->list_lock &c->lock irq_context: 0 sb_writers#11 &rq->__lock irq_context: 0 sb_writers#11 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 pool_lock#2 irq_context: 0 sb_writers#11 &____s->seqcount irq_context: 0 sb_writers#11 &p->lock irq_context: 0 sb_writers#11 &p->lock fs_reclaim irq_context: 0 sb_writers#11 &p->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &p->lock stock_lock irq_context: 0 sb_writers#11 &p->lock &c->lock irq_context: 0 sb_writers#11 &p->lock &____s->seqcount#2 irq_context: 0 sb_writers#11 &p->lock &____s->seqcount irq_context: 0 sb_writers#11 &p->lock pool_lock#2 irq_context: 0 sb_writers#11 &p->lock &n->list_lock irq_context: 0 sb_writers#11 &p->lock &n->list_lock &c->lock irq_context: 0 sb_writers#11 &p->lock &rq->__lock irq_context: 0 sb_writers#11 &p->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &p->lock &of->mutex irq_context: 0 sb_writers#11 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#11 tk_core.seq.seqcount irq_context: 0 sb_writers#11 sb_writers#11 mount_lock irq_context: 0 sb_writers#11 sb_writers#11 tk_core.seq.seqcount irq_context: 0 sb_writers#11 sb_writers#11 &sb->s_type->i_lock_key#31 irq_context: 0 sb_writers#11 sb_writers#11 &wb->list_lock irq_context: 0 sb_writers#11 sb_writers#11 &wb->list_lock &sb->s_type->i_lock_key#31 irq_context: 0 rtnl_mutex &block->lock fs_reclaim irq_context: 0 rtnl_mutex &block->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &block->lock &c->lock irq_context: 0 rtnl_mutex &block->lock pool_lock#2 irq_context: 0 rtnl_mutex &chain->filter_chain_lock irq_context: 0 rtnl_mutex cls_mod_lock irq_context: 0 rtnl_mutex &chain->filter_chain_lock &block->lock irq_context: 0 rtnl_mutex act_mod_lock irq_context: 0 rtnl_mutex &tn->idrinfo->lock irq_context: 0 rtnl_mutex &tn->idrinfo->lock fs_reclaim irq_context: 0 rtnl_mutex &tn->idrinfo->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &tn->idrinfo->lock pool_lock#2 irq_context: 0 rtnl_mutex &p->tcfa_lock irq_context: 0 rtnl_mutex &p->tcfa_lock &(to_police(*a)->tcfp_lock) irq_context: 0 rtnl_mutex &p->tcfa_lock &(to_police(*a)->tcfp_lock) tk_core.seq.seqcount irq_context: 0 rtnl_mutex &tn->idrinfo->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &chain->filter_chain_lock &block->proto_destroy_lock irq_context: 0 rtnl_mutex &block->proto_destroy_lock irq_context: 0 rtnl_mutex &block->lock &____s->seqcount#2 irq_context: 0 rtnl_mutex &block->lock &____s->seqcount irq_context: 0 rtnl_mutex &block->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &block->lock nl_table_lock irq_context: 0 rtnl_mutex &block->lock nl_table_wait.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysctl_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysctl_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount irq_context: 0 rtnl_mutex sysctl_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 rtnl_mutex sysctl_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 rcu_state.barrier_mutex rcu_state.barrier_mutex.wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex.wait_lock irq_context: 0 &p->lock batched_entropy_u8.lock irq_context: 0 &p->lock kfence_freelist_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &n->list_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC rcu_node_0 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 lweventlist_lock pool_lock#2 irq_context: 0 lweventlist_lock &dir->lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 rcu_node_0 irq_context: softirq &(&ssp->srcu_sup->work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 clock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 &sctp_port_hashtable[i].lock irq_context: 0 sk_lock-AF_INET6 &sctp_port_hashtable[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 &____s->seqcount irq_context: 0 sk_lock-AF_INET6 krc.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 krc.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 &sctp_port_hashtable[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 &sctp_port_hashtable[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 &sctp_port_hashtable[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_lock_key &xa->xa_lock#8 irq_context: 0 &lruvec->lru_lock irq_context: 0 &sb->s_type->i_lock_key &xa->xa_lock#8 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_lock_key &xa->xa_lock#8 pool_lock#2 irq_context: softirq rcu_callback &dir->lock &obj_hash[i].lock irq_context: softirq rcu_callback &dir->lock pool_lock#2 irq_context: 0 &info->lock irq_context: 0 &net->xdp.lock irq_context: 0 &xs->mutex irq_context: 0 &xs->mutex fs_reclaim irq_context: 0 &xs->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &xs->mutex pool_lock#2 irq_context: 0 &xs->mutex free_vmap_area_lock irq_context: 0 &xs->mutex vmap_area_lock irq_context: 0 &xs->mutex &____s->seqcount irq_context: 0 &xs->mutex init_mm.page_table_lock irq_context: 0 &xs->mutex &c->lock irq_context: 0 &xs->mutex &n->list_lock irq_context: 0 &xs->mutex &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem nf_hook_mutex &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock vmap_area_lock irq_context: 0 pernet_ops_rwsem nl_table_lock nl_table_wait.lock irq_context: 0 rtnl_mutex uevent_sock_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 rtnl_mutex uevent_sock_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem stack_depot_init_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem stack_depot_init_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &net->xdp.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->map_list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->mutex irq_context: 0 &sb->s_type->i_mutex_key#10 vmap_area_lock irq_context: 0 &sb->s_type->i_mutex_key#10 purge_vmap_area_lock irq_context: 0 &sb->s_type->i_mutex_key#10 purge_vmap_area_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 purge_vmap_area_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_XDP irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem ipvs->est_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem ipvs->est_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock key#8 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem batched_entropy_u8.lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem kfence_freelist_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &meta->lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem rcu_node_0 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock &n->list_lock &c->lock irq_context: 0 tomoyo_ss remove_cache_srcu rcu_node_0 irq_context: 0 l2tp_ip6_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key &c->lock irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key &n->list_lock irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key &n->list_lock &c->lock irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 l2tp_ip6_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock rcu_read_lock &n->list_lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock rcu_read_lock &n->list_lock &c->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &nf_nat_locks[i] irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem stock_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 fill_pool_map-wait-type-override &c->lock irq_context: 0 &pipe->mutex/1 &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 &pipe->mutex/1 &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 &pipe->mutex/1 &mm->mmap_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock &____s->seqcount irq_context: softirq (&sdp->delay_work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock &local->rx_path_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq rcu_read_lock &local->rx_path_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock mount_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &p->alloc_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &list->lock irq_context: 0 &sb->s_type->i_mutex_key#8 kauditd_wait.lock irq_context: 0 &sb->s_type->i_mutex_key#8 kauditd_wait.lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 kauditd_wait.lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 kauditd_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex crypto_alg_sem irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex (kmod_concurrent_max).lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex fs_reclaim irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex pool_lock#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex &c->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex rcu_read_lock &pool->lock/1 irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex &x->wait#17 irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override &n->list_lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 irq_context: 0 &sb->s_type->i_mutex_key#8 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#8 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sb->s_type->i_lock_key#22 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &xa->xa_lock#8 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem lock#4 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem lock#4 &lruvec->lru_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem lock#5 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle lock#4 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle lock#5 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_es_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sbi->s_md_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &ei->i_prealloc_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &pa->pa_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem ptlock_ptr(page)#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &mapping->private_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &memcg->move_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#8 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#8 &s->s_inode_wblist_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_raw_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &dd->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &dd->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &base->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &wb->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &folio_wait_table[i] irq_context: 0 &sb->s_type->i_mutex_key#8 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 lock#4 irq_context: 0 &sb->s_type->i_mutex_key#8 lock#5 irq_context: 0 &sb->s_type->i_mutex_key#8 &ei->i_es_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &journal->j_state_lock irq_context: 0 &sb->s_type->i_mutex_key#8 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &dd->lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &dd->lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &base->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 (&timer.timer) irq_context: 0 rcu_read_lock rcu_read_lock_bh quarantine_lock irq_context: 0 cb_lock genl_mutex remove_cache_srcu pool_lock#2 irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex tcp_metrics_lock irq_context: 0 rtnl_mutex sysctl_lock krc.lock &obj_hash[i].lock irq_context: 0 rtnl_mutex sysctl_lock krc.lock &base->lock irq_context: 0 rtnl_mutex sysctl_lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex uevent_sock_mutex &____s->seqcount#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex running_helpers_waitq.lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 key#23 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle remove_cache_srcu &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle remove_cache_srcu pool_lock#2 irq_context: 0 cb_lock genl_mutex tcp_metrics_lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex tcp_metrics_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex tcp_metrics_lock krc.lock irq_context: 0 sb_writers#8 remove_cache_srcu pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock &pa->pa_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock key#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 lock#4 &lruvec->lru_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &pcp->lock &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &____s->seqcount irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) key#14 irq_context: 0 &sb->s_type->i_mutex_key#8 batched_entropy_u8.lock irq_context: 0 &sb->s_type->i_mutex_key#8 kfence_freelist_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock key#10 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &____s->seqcount irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex crypto_alg_sem irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex (crypto_chain).rwsem irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex (crypto_chain).rwsem fs_reclaim irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex (crypto_chain).rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex (crypto_chain).rwsem &c->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex (crypto_chain).rwsem &____s->seqcount#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex (crypto_chain).rwsem &____s->seqcount irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex (crypto_chain).rwsem pool_lock#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex (crypto_chain).rwsem kthread_create_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex (crypto_chain).rwsem &p->pi_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex (crypto_chain).rwsem &p->pi_lock &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex (crypto_chain).rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex (crypto_chain).rwsem &x->wait irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex (crypto_chain).rwsem &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex (crypto_chain).rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex (crypto_chain).rwsem &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex &x->wait#21 irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex &base->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex &base->lock &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex (&timer.timer) irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &n->list_lock &c->lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem key#3 irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex remove_cache_srcu irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex remove_cache_srcu quarantine_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex remove_cache_srcu &c->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex remove_cache_srcu &n->list_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex remove_cache_srcu pool_lock#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex &n->list_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex &n->list_lock &c->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal pgd_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal key irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal pcpu_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal percpu_counters_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal pcpu_lock stock_lock irq_context: 0 &sb->s_type->i_lock_key &xa->xa_lock#8 &obj_hash[i].lock pool_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex pgd_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex rcu_read_lock pool_lock#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex key irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex pcpu_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex percpu_counters_lock irq_context: 0 sk_lock-AF_PPPOX irq_context: 0 sk_lock-AF_PPPOX slock-AF_PPPOX irq_context: 0 sk_lock-AF_PPPOX &pn->hash_lock irq_context: 0 sk_lock-AF_PPPOX clock-AF_PPPOX irq_context: 0 sk_lock-AF_PPPOX rlock-AF_PPPOX irq_context: 0 slock-AF_PPPOX irq_context: 0 pernet_ops_rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 pernet_ops_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex &____s->seqcount irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex &____s->seqcount#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex &obj_hash[i].lock pool_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex &cfs_rq->removed.lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex fs_reclaim &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex crypto_alg_sem &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex pfkey_mutex crypto_alg_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex rlock-AF_KEY irq_context: 0 &sig->cred_guard_mutex &tsk->futex_exit_mutex rcu_node_0 irq_context: 0 &sig->cred_guard_mutex &tsk->futex_exit_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem ipvs->est_mutex &____s->seqcount#2 irq_context: softirq (&pool->mayday_timer) &pool->lock/1 wq_mayday_lock &p->pi_lock irq_context: softirq (&pool->mayday_timer) &pool->lock/1 wq_mayday_lock &p->pi_lock &rq->__lock irq_context: softirq (&pool->mayday_timer) &pool->lock/1 wq_mayday_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex key#3 irq_context: 0 wq_pool_attach_mutex &p->pi_lock irq_context: 0 wq_pool_attach_mutex &p->pi_lock &rq->__lock irq_context: 0 wq_pool_attach_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pool->lock/1 wq_mayday_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &fsnotify_mark_srcu &group->notification_waitq &ep->lock &ep->wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_es_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &bat_priv->forw_bat_list_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &bat_priv->forw_bat_list_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &bat_priv->forw_bat_list_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &bat_priv->forw_bat_list_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &bat_priv->forw_bat_list_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ei->i_prealloc_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &pa->pa_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &mapping->private_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ret->b_state_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &pa->pa_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &lg->lg_prealloc_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&peer->timer_send_keepalive) &c->lock irq_context: softirq (&peer->timer_send_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq (&peer->timer_send_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount#2 irq_context: 0 rtnl_mutex &p->alloc_lock irq_context: softirq (&pool->mayday_timer) &pool->lock/1 wq_mayday_lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex fs_reclaim &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) remove_cache_srcu irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) remove_cache_srcu &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET6 ip6_fl_lock irq_context: 0 sk_lock-AF_INET6 cpu_hotplug_lock irq_context: 0 sk_lock-AF_INET6 cpu_hotplug_lock jump_label_mutex irq_context: 0 sk_lock-AF_INET6 cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 sk_lock-AF_INET6 cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &cfs_rq->removed.lock irq_context: 0 &fsnotify_mark_srcu &cfs_rq->removed.lock irq_context: 0 &fsnotify_mark_srcu &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 oom_adj_mutex &rq->__lock irq_context: 0 sb_writers#3 oom_adj_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 oom_adj_mutex &cfs_rq->removed.lock irq_context: 0 sb_writers#3 oom_adj_mutex &obj_hash[i].lock irq_context: 0 sb_writers#3 oom_adj_mutex pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 &mm->mmap_lock remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_hotplug_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_hotplug_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET6 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET6 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET6 elock-AF_INET6 irq_context: 0 pernet_ops_rwsem krc.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock remove_cache_srcu irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock remove_cache_srcu quarantine_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock remove_cache_srcu &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock remove_cache_srcu &n->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock remove_cache_srcu pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock remove_cache_srcu &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rcu_read_lock &tn->node_list_lock irq_context: 0 pernet_ops_rwsem ebt_mutex irq_context: 0 pernet_ops_rwsem &xt[i].mutex irq_context: 0 pernet_ops_rwsem &nft_net->commit_mutex irq_context: 0 pernet_ops_rwsem netns_bpf_mutex irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex rcu_node_0 irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &rnp->exp_lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex.wait_lock irq_context: 0 pernet_ops_rwsem (work_completion)(&ht->run_work) irq_context: 0 pernet_ops_rwsem &ht->mutex irq_context: 0 pernet_ops_rwsem &ht->mutex rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem &ht->mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 &sb->s_type->i_mutex_key#8 &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex fs_reclaim &rq->__lock irq_context: 0 &xt[i].mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_base_lock &xa->xa_lock#3 irq_context: 0 pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock &list->lock#5 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 pernet_ops_rwsem rtnl_mutex bpf_devs_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &in_dev->mc_tomb_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock krc.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex class irq_context: 0 pernet_ops_rwsem rtnl_mutex (&tbl->proxy_timer) irq_context: 0 pernet_ops_rwsem rtnl_mutex &base->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &ul->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &net->xdp.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex krc.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex mirred_list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &nft_net->commit_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &tb->tb6_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &pnn->pndevs.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &pnn->routes.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex target_list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex _xmit_TUNNEL irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex kernfs_idr_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 pernet_ops_rwsem rtnl_mutex subsys mutex#17 &k->k_lock klist_remove_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex deferred_probe_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex device_links_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock xps_map_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex rcu_state.barrier_lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex &x->wait#24 irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &ent->pde_unload_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &net->ipv6.addrconf_hash_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &ndev->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &ndev->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_query_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_query_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex (work_completion)(&(&idev->mc_report_work)->work) irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_report_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock krc.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex _xmit_SIT irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex rcu_state.barrier_mutex.wait_lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex.wait_lock irq_context: 0 pernet_ops_rwsem dev_base_lock irq_context: 0 pernet_ops_rwsem lweventlist_lock irq_context: 0 pernet_ops_rwsem napi_hash_lock irq_context: 0 pernet_ops_rwsem &dir->lock#2 &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &dir->lock#2 pool_lock#2 irq_context: 0 pernet_ops_rwsem netdev_unregistering_wq.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER krc.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex _xmit_ETHER irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle lock#4 &lruvec->lru_lock irq_context: softirq &ei->i_completed_io_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex _xmit_TUNNEL6 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex kernfs_idr_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex kernfs_idr_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex _xmit_IPGRE irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 lock#4 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex rcu_state.barrier_mutex.wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 pernet_ops_rwsem &fn->fou_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work rcu_state.barrier_mutex rcu_state.barrier_mutex.wait_lock irq_context: 0 (wq_completion)events fqdir_free_work rcu_state.barrier_mutex.wait_lock irq_context: 0 (wq_completion)events fqdir_free_work &p->pi_lock irq_context: 0 (wq_completion)events fqdir_free_work &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events fqdir_free_work &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock key#7 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem ipvs->sync_mutex irq_context: 0 pernet_ops_rwsem hwsim_radio_lock irq_context: 0 (wq_completion)netns net_cleanup_work rcu_state.barrier_mutex.wait_lock irq_context: 0 pernet_ops_rwsem &ent->pde_unload_lock irq_context: 0 (wq_completion)netns net_cleanup_work &p->pi_lock irq_context: 0 pernet_ops_rwsem rdma_nets_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rdma_nets_rwsem rdma_nets.xa_lock irq_context: 0 (wq_completion)netns net_cleanup_work &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rdma_nets_rwsem &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-clock-AF_NETLINK irq_context: 0 pernet_ops_rwsem &nlk->wait irq_context: 0 pernet_ops_rwsem wlock-AF_NETLINK irq_context: 0 pernet_ops_rwsem &xa->xa_lock#8 irq_context: 0 pernet_ops_rwsem &fsnotify_mark_srcu irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_base_lock &xa->xa_lock#3 &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_base_lock &xa->xa_lock#3 pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex _xmit_LOOPBACK irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &ei->i_es_lock key#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &base->lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &rnp->exp_wq[1] irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem rtnl_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: softirq net/ipv6/ip6_flowlabel.c:57 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &rnp->exp_wq[0] irq_context: softirq net/ipv6/ip6_flowlabel.c:57 rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu irq_context: softirq net/ipv6/ip6_flowlabel.c:57 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rnp->exp_wq[0] irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &rnp->exp_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &rnp->exp_wq[0] irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 pernet_ops_rwsem &hn->hn_lock irq_context: 0 pernet_ops_rwsem sysctl_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem sysctl_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem sysctl_lock krc.lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex.wait_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &this->info_list_lock irq_context: 0 pernet_ops_rwsem &pnettable->lock irq_context: 0 pernet_ops_rwsem &pnetids_ndev->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6/1 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6/1 k-slock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6/1 rlock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6/1 &list->lock#17 irq_context: 0 pernet_ops_rwsem &net->sctp.addr_wq_lock irq_context: 0 pernet_ops_rwsem &net->sctp.addr_wq_lock k-slock-AF_INET6/1 irq_context: 0 pernet_ops_rwsem &net->sctp.addr_wq_lock k-slock-AF_INET6/1 &sctp_ep_hashtable[i].lock irq_context: 0 pernet_ops_rwsem &net->sctp.addr_wq_lock k-slock-AF_INET6/1 &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &net->sctp.addr_wq_lock k-slock-AF_INET6/1 pool_lock#2 irq_context: 0 pernet_ops_rwsem &net->sctp.addr_wq_lock k-slock-AF_INET6/1 k-clock-AF_INET6 irq_context: 0 pernet_ops_rwsem &net->sctp.addr_wq_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-clock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-slock-AF_INET6 &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-slock-AF_INET6 pool_lock#2 irq_context: 0 pernet_ops_rwsem k-slock-AF_INET6 elock-AF_INET6 irq_context: 0 pernet_ops_rwsem rtnl_mutex &rnp->exp_wq[2] irq_context: 0 pernet_ops_rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rnp->exp_wq[2] irq_context: softirq (&peer->timer_persistent_keepalive) &n->list_lock irq_context: 0 (wq_completion)events ((ipv6_flowlabel_exclusive).work).work irq_context: 0 (wq_completion)events ((ipv6_flowlabel_exclusive).work).work cpu_hotplug_lock irq_context: 0 (wq_completion)events ((ipv6_flowlabel_exclusive).work).work cpu_hotplug_lock jump_label_mutex irq_context: 0 (wq_completion)events ((ipv6_flowlabel_exclusive).work).work cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 (wq_completion)events ((ipv6_flowlabel_exclusive).work).work cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: softirq (&peer->timer_persistent_keepalive) &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&hsr->announce_timer) rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem batched_entropy_u8.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex smc_ib_devices.mutex &rq->__lock irq_context: softirq rcu_callback rcu_read_lock &pool->lock/1 irq_context: softirq rcu_callback rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq rcu_callback rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &cfs_rq->removed.lock irq_context: softirq slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal remove_cache_srcu irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem krc.lock &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem krc.lock &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET k-clock-AF_INET irq_context: 0 pernet_ops_rwsem k-slock-AF_INET irq_context: 0 pernet_ops_rwsem k-slock-AF_INET &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-slock-AF_INET pool_lock#2 irq_context: 0 pernet_ops_rwsem k-slock-AF_INET elock-AF_INET irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER krc.lock &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysctl_lock krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysctl_lock krc.lock &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysctl_lock krc.lock &base->lock &obj_hash[i].lock irq_context: softirq (&peer->timer_persistent_keepalive) &____s->seqcount#2 irq_context: softirq (&peer->timer_persistent_keepalive) &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dir->lock#2 &meta->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dir->lock#2 kfence_freelist_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex kernfs_idr_lock &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_lock_key#27 irq_context: 0 &f->f_pos_lock &sb->s_type->i_mutex_key#18 irq_context: 0 &f->f_pos_lock &sb->s_type->i_mutex_key#18 &dentry->d_lock irq_context: 0 &f->f_pos_lock &sb->s_type->i_mutex_key#18 tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock &sb->s_type->i_mutex_key#18 sb_writers#13 mount_lock irq_context: 0 &f->f_pos_lock &sb->s_type->i_mutex_key#18 sb_writers#13 tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock &sb->s_type->i_mutex_key#18 sb_writers#13 &sb->s_type->i_lock_key#27 irq_context: 0 &f->f_pos_lock &sb->s_type->i_mutex_key#18 sb_writers#13 &wb->list_lock irq_context: 0 &f->f_pos_lock &sb->s_type->i_mutex_key#18 sb_writers#13 &wb->list_lock &sb->s_type->i_lock_key#27 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 pernet_ops_rwsem &sn->gssp_lock irq_context: 0 pernet_ops_rwsem &cd->hash_lock irq_context: 0 pernet_ops_rwsem cache_list_lock &cd->hash_lock irq_context: 0 pernet_ops_rwsem (&net->can.stattimer) irq_context: 0 pernet_ops_rwsem xfrm_state_gc_work irq_context: 0 pernet_ops_rwsem &net->xfrm.xfrm_state_lock irq_context: 0 pernet_ops_rwsem &ht->mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem (work_completion)(&(&net->ipv6.addr_chk_work)->work) irq_context: 0 pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock pool_lock#2 irq_context: 0 pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock krc.lock irq_context: 0 pernet_ops_rwsem ip6_fl_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &net->rules_mod_lock irq_context: 0 pernet_ops_rwsem (&net->ipv6.ip6_fib_timer) irq_context: 0 pernet_ops_rwsem rtnl_mutex (&mrt->ipmr_expire_timer) irq_context: 0 pernet_ops_rwsem rtnl_mutex (work_completion)(&ht->run_work) irq_context: 0 pernet_ops_rwsem rtnl_mutex &ht->mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex &ht->mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &ht->mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem __ip_vs_mutex irq_context: 0 pernet_ops_rwsem (&ipvs->dest_trash_timer) irq_context: 0 pernet_ops_rwsem (work_completion)(&(&ipvs->expire_nodest_conn_work)->work) irq_context: 0 pernet_ops_rwsem (work_completion)(&(&ipvs->defense_work)->work) irq_context: 0 pernet_ops_rwsem ipvs->est_mutex pcpu_lock irq_context: 0 pernet_ops_rwsem ipvs->est_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem (work_completion)(&(&ipvs->est_reload_work)->work) irq_context: 0 pernet_ops_rwsem nfnl_subsys_ipset irq_context: 0 pernet_ops_rwsem recent_lock irq_context: 0 pernet_ops_rwsem hashlimit_mutex irq_context: 0 pernet_ops_rwsem trans_gc_work irq_context: 0 pernet_ops_rwsem (work_completion)(&(&cnet->ecache.dwork)->work) irq_context: 0 pernet_ops_rwsem rtnl_mutex nf_connlabels_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_hook_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem sysfs_symlink_target_lock irq_context: 0 pernet_ops_rwsem kernfs_idr_lock irq_context: 0 pernet_ops_rwsem sysctl_lock &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem tcp_metrics_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem k-clock-AF_INET irq_context: 0 pernet_ops_rwsem (work_completion)(&net->xfrm.policy_hash_work) irq_context: 0 pernet_ops_rwsem &net->xfrm.xfrm_policy_lock irq_context: 0 pernet_ops_rwsem (work_completion)(&net->xfrm.state_hash_work) irq_context: 0 pernet_ops_rwsem &list->lock#2 irq_context: 0 pernet_ops_rwsem &xa->xa_lock#3 irq_context: 0 pernet_ops_rwsem genl_sk_destructing_waitq.lock irq_context: 0 &dir->lock &obj_hash[i].lock irq_context: 0 &dir->lock pool_lock#2 irq_context: 0 tasklist_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 pernet_ops_rwsem pgd_lock irq_context: 0 pernet_ops_rwsem key irq_context: 0 pernet_ops_rwsem pcpu_lock stock_lock irq_context: 0 sb_writers#4 lock#4 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 lock#4 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 lock#4 &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &meta->lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &pnsocks.lock irq_context: 0 &sb->s_type->i_mutex_key#10 resource_mutex irq_context: 0 sk_lock-AF_INET6 l2tp_ip6_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &pcp->lock &zone->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_PHONET irq_context: 0 &sb->s_type->i_mutex_key#10 rlock-AF_PHONET irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock &p->lock stock_lock irq_context: 0 &f->f_pos_lock &p->lock cgroup_mutex irq_context: 0 &f->f_pos_lock &p->lock cgroup_mutex css_set_lock irq_context: 0 &f->f_pos_lock &p->lock cgroup_mutex css_set_lock kernfs_rename_lock irq_context: 0 &f->f_pos_lock &p->lock &obj_hash[i].lock irq_context: 0 dgram_lock irq_context: 0 sk_lock-AF_IEEE802154 irq_context: 0 sk_lock-AF_IEEE802154 slock-AF_IEEE802154 irq_context: 0 slock-AF_IEEE802154 irq_context: 0 &sec->lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM &rdev->wpan_phy.queue_lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM rcu_read_lock &c->lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM rcu_read_lock &list->lock#29 irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM &rdev->wpan_phy.sync_txq irq_context: softirq &list->lock#29 irq_context: softirq rcu_read_lock rcu_read_lock raw_lock irq_context: 0 &sb->s_type->i_mutex_key#10 dgram_lock irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_IEEE802154 irq_context: 0 &sb->s_type->i_mutex_key#10 rlock-AF_IEEE802154 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &folio_wait_table[i] &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 tcpv6_prot_mutex irq_context: 0 sk_lock-AF_INET6 device_spinlock irq_context: 0 sk_lock-AF_INET6 clock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 clock-AF_INET6 &c->lock irq_context: 0 sk_lock-AF_INET6 clock-AF_INET6 pool_lock#2 irq_context: 0 sk_lock-AF_INET6 &n->list_lock irq_context: 0 sk_lock-AF_INET6 &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 crypto_alg_sem irq_context: 0 sk_lock-AF_INET6 (kmod_concurrent_max).lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &pool->lock/1 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 &x->wait#17 irq_context: 0 rcu_read_lock &vma->vm_lock->lock &rcu_state.gp_wq irq_context: 0 rcu_read_lock &vma->vm_lock->lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 rcu_read_lock &vma->vm_lock->lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock &vma->vm_lock->lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &rq->__lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 slock-AF_INET6 &sk->sk_lock.wq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rhashtable_bucket irq_context: 0 sk_lock-AF_INET6 lock irq_context: 0 sk_lock-AF_INET6 lock sctp_assocs_id_lock irq_context: 0 sk_lock-AF_INET6 lock sctp_assocs_id_lock pool_lock#2 irq_context: 0 rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 &asoc->wait irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_node_0 irq_context: 0 sk_lock-AF_INET6 running_helpers_waitq.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex fs_reclaim irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &p->alloc_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &list->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 kauditd_wait.lock irq_context: 0 lock map_idr_lock &c->lock irq_context: 0 sk_lock-AF_UNIX rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_UNIX rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_UNIX rcu_read_lock clock-AF_UNIX irq_context: 0 sk_lock-AF_UNIX rcu_read_lock clock-AF_UNIX pool_lock#2 irq_context: 0 sk_lock-AF_UNIX rcu_read_lock clock-AF_UNIX &obj_hash[i].lock irq_context: 0 sk_lock-AF_UNIX rcu_read_lock unix_dgram_prot_lock irq_context: 0 sk_lock-AF_UNIX rcu_read_lock &htab->buckets[i].lock irq_context: 0 sk_lock-AF_UNIX rcu_read_lock &htab->buckets[i].lock stock_lock irq_context: 0 sk_lock-AF_UNIX rcu_read_lock &htab->buckets[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_UNIX rcu_read_lock &htab->buckets[i].lock &psock->link_lock irq_context: 0 sk_lock-AF_UNIX rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_UNIX rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_UNIX irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_UNIX slock-AF_UNIX irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_UNIX rcu_read_lock &psock->link_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_UNIX rcu_read_lock &htab->buckets[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_UNIX rcu_read_lock &htab->buckets[i].lock &psock->link_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_UNIX rcu_read_lock &htab->buckets[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_UNIX rcu_read_lock &htab->buckets[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_UNIX rcu_read_lock &htab->buckets[i].lock krc.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_UNIX rcu_read_lock &htab->buckets[i].lock krc.lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_UNIX rcu_read_lock &htab->buckets[i].lock krc.lock &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_UNIX rcu_read_lock &htab->buckets[i].lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_UNIX rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_UNIX rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_UNIX rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_UNIX rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_UNIX rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_UNIX &psock->ingress_lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_UNIX irq_context: 0 &sb->s_type->i_mutex_key#10 (work_completion)(&(&psock->work)->work) irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_UNIX irq_context: 0 &sb->s_type->i_mutex_key#10 &psock->ingress_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) &htab->buckets[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)events (work_completion)(&(&psock->rwork)->work) irq_context: 0 (wq_completion)events (work_completion)(&(&psock->rwork)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&psock->rwork)->work) (work_completion)(&(&psock->work)->work) irq_context: 0 (wq_completion)events (work_completion)(&(&psock->rwork)->work) &list->lock#30 irq_context: 0 (wq_completion)events (work_completion)(&(&psock->rwork)->work) rlock-AF_UNIX irq_context: 0 (wq_completion)events (work_completion)(&(&psock->rwork)->work) pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&psock->rwork)->work) &dir->lock irq_context: 0 (wq_completion)events (work_completion)(&(&psock->rwork)->work) stock_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_es_lock key#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET6 crypto_alg_sem irq_context: 0 sk_lock-AF_INET6 (crypto_chain).rwsem irq_context: 0 sk_lock-AF_INET6 (crypto_chain).rwsem fs_reclaim irq_context: 0 sk_lock-AF_INET6 (crypto_chain).rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET6 (crypto_chain).rwsem pool_lock#2 irq_context: 0 sk_lock-AF_INET6 (crypto_chain).rwsem kthread_create_lock irq_context: 0 sk_lock-AF_INET6 (crypto_chain).rwsem &p->pi_lock irq_context: 0 sk_lock-AF_INET6 (crypto_chain).rwsem &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 (crypto_chain).rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 (crypto_chain).rwsem &x->wait irq_context: 0 sk_lock-AF_INET6 (crypto_chain).rwsem &rq->__lock irq_context: 0 sk_lock-AF_INET6 (crypto_chain).rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 (crypto_chain).rwsem &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 &x->wait#21 irq_context: 0 sk_lock-AF_INET6 (&timer.timer) irq_context: 0 &sb->s_type->i_mutex_key#10 (work_completion)(&(&sw_ctx_tx->tx_work.work)->work) irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &sw_ctx_tx->encrypt_compl_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (work_completion)(&strp->work) irq_context: 0 &sb->s_type->i_mutex_key#10 krc.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &____s->seqcount irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx fs_reclaim irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &c->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &local->ack_status_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &local->ack_status_lock pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock &local->queue_stop_reason_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock tk_core.seq.seqcount irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock hwsim_radio_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock hwsim_radio_lock pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock hwsim_radio_lock &list->lock#16 irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock &list->lock#16 irq_context: softirq rcu_read_lock lock#6 &kcov->lock irq_context: softirq rcu_read_lock &local->ack_status_lock irq_context: softirq rcu_read_lock &local->ack_status_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &local->ack_status_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock nl_table_lock irq_context: softirq rcu_read_lock rcu_read_lock nl_table_wait.lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock rcu_node_0 irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx lock#6 &kcov->lock irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock &c->lock irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock hwsim_radio_lock &c->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock hwsim_radio_lock &n->list_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock hwsim_radio_lock &n->list_lock &c->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &local->ack_status_lock &c->lock irq_context: 0 cb_lock remove_cache_srcu &rq->__lock irq_context: 0 cb_lock remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &n->list_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &n->list_lock &c->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx batched_entropy_u8.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &meta->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock rcu_read_lock &sta->rate_ctrl_lock &c->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &local->ack_status_lock &____s->seqcount#2 irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &local->ack_status_lock &pcp->lock &zone->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &local->ack_status_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &local->ack_status_lock &____s->seqcount irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock hwsim_radio_lock &____s->seqcount#2 irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock hwsim_radio_lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 smc_v4_hashinfo.lock irq_context: 0 &smc->clcsock_release_lock irq_context: 0 &smc->clcsock_release_lock &mm->mmap_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (work_completion)(&smc->connect_work) irq_context: 0 &sb->s_type->i_mutex_key#10 (work_completion)(&smc->connect_work) &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 (work_completion)(&smc->connect_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_SMC irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_SMC &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_SMC &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_SMC slock-AF_SMC irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_SMC smc_v4_hashinfo.lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_SMC irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET k-clock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET k-slock-AF_INET &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET k-slock-AF_INET elock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-slock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock &dir->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock &sb->s_type->i_lock_key#8 irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock &xa->xa_lock#8 irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock &fsnotify_mark_srcu irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_SMC clock-AF_SMC irq_context: 0 br_ioctl_mutex rtnl_mutex irq_context: 0 br_ioctl_mutex rtnl_mutex &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex &mm->mmap_lock irq_context: 0 br_ioctl_mutex rtnl_mutex.wait_lock irq_context: 0 br_ioctl_mutex &p->pi_lock irq_context: 0 br_ioctl_mutex &p->pi_lock &rq->__lock irq_context: 0 br_ioctl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &____s->seqcount#2 irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock rcu_node_0 irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 &net->xfrm.xfrm_policy_lock irq_context: 0 sk_lock-AF_INET6 &____s->seqcount#14 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock nl_table_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rlock-AF_NETLINK irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock nl_table_wait.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &pcp->lock &zone->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &net->xfrm.xfrm_state_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &net->xfrm.xfrm_state_lock hrtimer_bases.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &net->xfrm.xfrm_state_lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &net->xfrm.xfrm_state_lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->xfrm.xfrm_policy_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &policy->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &list->lock#31 irq_context: 0 cb_lock &rdev->wiphy.mtx &____s->seqcount#2 irq_context: 0 cb_lock &rdev->wiphy.mtx fs_reclaim &rq->__lock irq_context: 0 sb_writers#8 kn->active#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu pool_lock#2 irq_context: 0 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock irq_context: 0 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock stock_lock irq_context: 0 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock &____s->seqcount irq_context: 0 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock &c->lock irq_context: 0 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock clock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock irq_context: 0 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock &____s->seqcount irq_context: 0 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock &c->lock irq_context: 0 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock clock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock &hashinfo->ehash_locks[i] irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 sk_lock-AF_INET6 rcu_read_lock &____s->seqcount#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 sk_lock-AF_INET6 rcu_read_lock &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 sk_lock-AF_INET6 rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 sk_lock-AF_INET6 rcu_read_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 sk_lock-AF_INET6 rcu_read_lock &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 sk_lock-AF_INET6 rcu_read_lock &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 sk_lock-AF_INET6 rcu_read_lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 sk_lock-AF_INET6 rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 sk_lock-AF_INET6 rcu_read_lock &base->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 sk_lock-AF_INET6 rcu_read_lock &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 sk_lock-AF_INET6 rcu_read_lock &hashinfo->ehash_locks[i] irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 sk_lock-AF_INET6 &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 sk_lock-AF_INET6 pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &ei->socket.wq.wait irq_context: 0 sk_lock-AF_INET6 &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &ei->socket.wq.wait irq_context: softirq rcu_read_lock rcu_read_lock &dreq->dreq_lock irq_context: softirq rcu_read_lock rcu_read_lock &dreq->dreq_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock &dreq->dreq_lock &dir->lock irq_context: softirq rcu_read_lock rcu_read_lock &dreq->dreq_lock slock-AF_INET6 irq_context: softirq rcu_read_lock rcu_read_lock &dreq->dreq_lock slock-AF_INET6 pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock &dreq->dreq_lock slock-AF_INET6 &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock &dreq->dreq_lock slock-AF_INET6 &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock &dreq->dreq_lock slock-AF_INET6 &dccp_hashinfo.bhash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock &dreq->dreq_lock slock-AF_INET6 &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock irq_context: softirq rcu_read_lock rcu_read_lock &dreq->dreq_lock slock-AF_INET6 &hashinfo->ehash_locks[i] irq_context: softirq rcu_read_lock rcu_read_lock &dreq->dreq_lock slock-AF_INET6 (&req->rsk_timer) irq_context: softirq rcu_read_lock rcu_read_lock &dreq->dreq_lock slock-AF_INET6 &base->lock irq_context: softirq rcu_read_lock rcu_read_lock &dreq->dreq_lock slock-AF_INET6 &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock &dreq->dreq_lock slock-AF_INET6 &queue->rskq_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 pool_lock#2 irq_context: 0 sk_lock-AF_INET6 &queue->rskq_lock irq_context: 0 sk_lock-AF_INET6 clock-AF_INET6 &n->list_lock irq_context: 0 sk_lock-AF_INET6 clock-AF_INET6 &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#8 key#10 irq_context: 0 sk_lock-AF_INET6 wlock-AF_INET6 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 sk_lock-AF_INET6 &____s->seqcount#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 sk_lock-AF_INET6 &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 sk_lock-AF_INET6 &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET6 &dccp_hashinfo.bhash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET6 &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &pcp->lock &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 (console_sem).lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 console_lock console_srcu console_owner_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 console_lock console_srcu console_owner irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 console_lock console_srcu console_owner &port_lock_key irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 console_lock console_srcu console_owner console_owner_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 sk_lock-AF_INET6 &base->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 sk_lock-AF_INET6 &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 sk_lock-AF_INET6 &hashinfo->ehash_locks[i] irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock rcu_read_lock &sta->rate_ctrl_lock &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock rcu_read_lock &sta->rate_ctrl_lock &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount irq_context: 0 rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &____s->seqcount#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &sem->wait_lock irq_context: 0 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 krc.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rhashtable_bucket irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 sctp_assocs_id_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 sctp_assocs_id_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 sctp_assocs_id_lock pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock &pa->pa_lock#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem key#3 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &pa->pa_lock#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) key#14 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &sem->wait_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &p->pi_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &p->pi_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#8 key#10 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock key#10 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_raw_lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock rcu_node_0 irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) vmap_area_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) purge_vmap_area_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) purge_vmap_area_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) purge_vmap_area_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &cfs_rq->removed.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 batched_entropy_u8.lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 kfence_freelist_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock &rcu_state.gp_wq irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#9 remove_cache_srcu irq_context: 0 &sb->s_type->i_mutex_key#9 remove_cache_srcu quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#9 remove_cache_srcu &c->lock irq_context: 0 &sb->s_type->i_mutex_key#9 remove_cache_srcu &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#9 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#9 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#9 remove_cache_srcu &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#9 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 quarantine_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) pcpu_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) &rnp->exp_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 sk_lock-AF_INET6 &hashinfo->ehash_locks[i] irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_X25 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_X25 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_X25 slock-AF_X25 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_X25 wlock-AF_X25 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_X25 &list->lock#32 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_X25 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_X25 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_X25 x25_list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_X25 rlock-AF_X25 irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_X25 irq_context: 0 &sb->s_type->i_mutex_key#10 &____s->seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 batched_entropy_u32.lock crngs.lock irq_context: 0 &mm->mmap_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &sem->wait_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &p->lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rcu_read_lock &ei->socket.wq.wait irq_context: 0 cb_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 cb_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 cb_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rcu_read_lock rcu_node_0 irq_context: 0 cb_lock rcu_read_lock &rq->__lock irq_context: 0 cb_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 delayed_uprobe_lock &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &(&net->xfrm.policy_hthresh.lock)->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &(&net->xfrm.policy_hthresh.lock)->lock &____s->seqcount#15 irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&net->xfrm.policy_hthresh.work) irq_context: 0 (wq_completion)events (work_completion)(&net->xfrm.policy_hthresh.work) hash_resize_mutex irq_context: 0 (wq_completion)events (work_completion)(&net->xfrm.policy_hthresh.work) hash_resize_mutex &____s->seqcount#15 irq_context: 0 (wq_completion)events (work_completion)(&net->xfrm.policy_hthresh.work) hash_resize_mutex &net->xfrm.xfrm_policy_lock irq_context: 0 (wq_completion)events (work_completion)(&net->xfrm.policy_hthresh.work) hash_resize_mutex &net->xfrm.xfrm_policy_lock &____s->seqcount#12 irq_context: 0 rtnl_mutex rcu_read_lock &n->lock irq_context: 0 rtnl_mutex rcu_read_lock &n->lock &____s->seqcount#9 irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex &c->lock irq_context: 0 cb_lock genl_mutex quarantine_lock irq_context: 0 &root->kernfs_rwsem &p->pi_lock irq_context: 0 &root->kernfs_rwsem &p->pi_lock &rq->__lock irq_context: 0 &root->kernfs_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock krc.lock &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)inet_frag_wq (work_completion)(&fqdir->destroy_work) &ht->mutex quarantine_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_lock_key#16 irq_context: 0 &sb->s_type->i_lock_key#16 &dentry->d_lock irq_context: 0 sk_lock-AF_RDS irq_context: 0 sk_lock-AF_RDS slock-AF_RDS irq_context: 0 sk_lock-AF_RDS &mm->mmap_lock irq_context: 0 sk_lock-AF_RDS rds_trans_sem irq_context: 0 slock-AF_RDS irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &bgl->locks[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &xt[i].mutex &lock->wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &xt[i].mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &xt[i].mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nl_table_lock nl_table_wait.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem kernfs_idr_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &____s->seqcount#2 irq_context: 0 cb_lock &rdev->wiphy.mtx remove_cache_srcu irq_context: 0 cb_lock &rdev->wiphy.mtx remove_cache_srcu quarantine_lock irq_context: 0 cb_lock &rdev->wiphy.mtx remove_cache_srcu &c->lock irq_context: 0 cb_lock &rdev->wiphy.mtx remove_cache_srcu &n->list_lock irq_context: 0 cb_lock &rdev->wiphy.mtx remove_cache_srcu &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx remove_cache_srcu pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &local->ack_status_lock &n->list_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &local->ack_status_lock &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock &n->list_lock irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 tk_core.seq.seqcount irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 sb_writers#14 mount_lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 sb_writers#14 tk_core.seq.seqcount irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 sb_writers#14 &sb->s_type->i_lock_key#16 irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 sb_writers#14 &wb->list_lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 sb_writers#14 &wb->list_lock &sb->s_type->i_lock_key#16 irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 fs_reclaim irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 &c->lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 pool_lock#2 irq_context: 0 &mm->mmap_lock &hugetlbfs_i_mmap_rwsem_key irq_context: 0 &mm->mmap_lock &hugetlbfs_i_mmap_rwsem_key &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &hugetlbfs_i_mmap_rwsem_key pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work krc.lock &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work krc.lock &base->lock &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock &rcu_state.expedited_wq irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &rcu_state.expedited_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem sysctl_lock krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem sysctl_lock krc.lock &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem sysctl_lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) &rq->__lock irq_context: 0 &mm->mmap_lock &vma_lock->rw_sema irq_context: 0 &mm->mmap_lock &vma_lock->rw_sema &hugetlbfs_i_mmap_rwsem_key irq_context: 0 &mm->mmap_lock &vma_lock->rw_sema &hugetlbfs_i_mmap_rwsem_key ptlock_ptr(page) irq_context: 0 &mm->mmap_lock &hugetlbfs_i_mmap_rwsem_key irq_context: 0 &mm->mmap_lock &hugetlbfs_i_mmap_rwsem_key &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &hugetlbfs_i_mmap_rwsem_key pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &resv_map->lock irq_context: 0 &pipe->rd_wait &p->pi_lock &rq->__lock &obj_hash[i].lock irq_context: 0 &pipe->rd_wait &p->pi_lock &rq->__lock &base->lock irq_context: 0 &pipe->rd_wait &p->pi_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->ipv4.ra_mutex irq_context: 0 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq irq_context: 0 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &audit_cmd_mutex.lock irq_context: 0 &audit_cmd_mutex.lock fs_reclaim irq_context: 0 &audit_cmd_mutex.lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &audit_cmd_mutex.lock pool_lock#2 irq_context: 0 &audit_cmd_mutex.lock rlock-AF_NETLINK irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &sem->wait_lock irq_context: 0 rlock-AF_BLUETOOTH irq_context: 0 ppp_mutex irq_context: 0 ppp_mutex &mm->mmap_lock irq_context: 0 ppp_mutex &mm->mmap_lock &rq->__lock irq_context: 0 ppp_mutex &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex fs_reclaim irq_context: 0 ppp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 ppp_mutex stock_lock irq_context: 0 ppp_mutex &c->lock irq_context: 0 ppp_mutex pool_lock#2 irq_context: 0 ppp_mutex stack_depot_init_mutex irq_context: 0 ppp_mutex rtnl_mutex irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 ppp_mutex rtnl_mutex &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &xa->xa_lock#8 stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &xa->xa_lock#8 &c->lock irq_context: 0 ppp_mutex rtnl_mutex pcpu_alloc_mutex irq_context: 0 ppp_mutex rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 ppp_mutex rtnl_mutex pcpu_alloc_mutex &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex rtnl_mutex &pn->all_ppp_mutex irq_context: 0 ppp_mutex rtnl_mutex &pn->all_ppp_mutex &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex &pn->all_ppp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex rtnl_mutex &pn->all_ppp_mutex fs_reclaim irq_context: 0 ppp_mutex rtnl_mutex &pn->all_ppp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 ppp_mutex rtnl_mutex &pn->all_ppp_mutex pool_lock#2 irq_context: 0 ppp_mutex rtnl_mutex fs_reclaim irq_context: 0 ppp_mutex rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 ppp_mutex rtnl_mutex pool_lock#2 irq_context: 0 ppp_mutex rtnl_mutex &xa->xa_lock#3 irq_context: 0 ppp_mutex rtnl_mutex net_rwsem irq_context: 0 ppp_mutex rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 ppp_mutex rtnl_mutex &tn->lock irq_context: 0 ppp_mutex rtnl_mutex &x->wait#9 irq_context: 0 ppp_mutex rtnl_mutex &obj_hash[i].lock irq_context: 0 ppp_mutex rtnl_mutex &k->list_lock irq_context: 0 ppp_mutex rtnl_mutex gdp_mutex irq_context: 0 ppp_mutex rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 ppp_mutex rtnl_mutex lock irq_context: 0 ppp_mutex rtnl_mutex lock kernfs_idr_lock irq_context: 0 ppp_mutex rtnl_mutex &root->kernfs_rwsem irq_context: 0 ppp_mutex rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 ppp_mutex rtnl_mutex bus_type_sem irq_context: 0 ppp_mutex rtnl_mutex sysfs_symlink_target_lock irq_context: 0 ppp_mutex rtnl_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex rtnl_mutex &____s->seqcount irq_context: 0 ppp_mutex rtnl_mutex &c->lock irq_context: 0 ppp_mutex rtnl_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 ppp_mutex rtnl_mutex remove_cache_srcu irq_context: 0 ppp_mutex rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 ppp_mutex rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 ppp_mutex rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 ppp_mutex rtnl_mutex remove_cache_srcu &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex rtnl_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 ppp_mutex rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 ppp_mutex rtnl_mutex &____s->seqcount#2 irq_context: 0 ppp_mutex rtnl_mutex &root->kernfs_rwsem irq_context: 0 ppp_mutex rtnl_mutex &dev->power.lock irq_context: 0 ppp_mutex rtnl_mutex dpm_list_mtx irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex fs_reclaim &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 ppp_mutex rtnl_mutex subsys mutex#17 irq_context: 0 ppp_mutex rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 ppp_mutex rtnl_mutex &dir->lock#2 irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 ppp_mutex rtnl_mutex dev_hotplug_mutex irq_context: 0 ppp_mutex rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 ppp_mutex rtnl_mutex dev_hotplug_mutex &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex dev_hotplug_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex rtnl_mutex dev_base_lock irq_context: 0 ppp_mutex rtnl_mutex input_pool.lock irq_context: 0 ppp_mutex rtnl_mutex batched_entropy_u32.lock irq_context: 0 ppp_mutex rtnl_mutex &tbl->lock irq_context: 0 ppp_mutex rtnl_mutex stock_lock irq_context: 0 ppp_mutex rtnl_mutex &n->list_lock irq_context: 0 ppp_mutex rtnl_mutex &n->list_lock &c->lock irq_context: 0 ppp_mutex rtnl_mutex sysctl_lock irq_context: 0 ppp_mutex rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 ppp_mutex rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock rcu_read_lock &ei->socket.wq.wait irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock rcu_read_lock &rq->__lock irq_context: 0 mgmt_chan_list_lock mgmt_chan_list_lock.wait_lock irq_context: 0 mgmt_chan_list_lock &rq->__lock irq_context: 0 mgmt_chan_list_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock.wait_lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &p->pi_lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &n->list_lock &c->lock irq_context: 0 ppp_mutex rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 ppp_mutex rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 ppp_mutex rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex rtnl_mutex nl_table_lock irq_context: 0 ppp_mutex rtnl_mutex nl_table_wait.lock irq_context: 0 ppp_mutex rtnl_mutex proc_subdir_lock irq_context: 0 ppp_mutex rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 ppp_mutex rtnl_mutex proc_subdir_lock irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock pool_lock#2 irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 ppp_mutex rtnl_mutex &pnettable->lock irq_context: 0 ppp_mutex rtnl_mutex smc_ib_devices.mutex irq_context: 0 ppp_mutex rtnl_mutex rcu_read_lock pool_lock#2 irq_context: 0 ppp_mutex rtnl_mutex &ppp->rlock irq_context: 0 ppp_mutex rtnl_mutex &ppp->wlock irq_context: 0 ppp_mutex rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 ppp_mutex rtnl_mutex.wait_lock irq_context: 0 ppp_mutex &p->pi_lock irq_context: 0 ppp_mutex &p->pi_lock &rq->__lock irq_context: 0 ppp_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem key#14 irq_context: 0 &sb->s_type->i_mutex_key#8 batched_entropy_u8.lock irq_context: 0 &sb->s_type->i_mutex_key#8 kfence_freelist_lock irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key &c->lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work &ACCESS_PRIVATE(ssp->srcu_sup, lock) irq_context: 0 &root->kernfs_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock rcu_read_lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &ul->lock#2 irq_context: 0 (wq_completion)events_power_efficient (gc_work).work &rcu_state.expedited_wq irq_context: 0 rtnl_mutex &ppp->rlock irq_context: 0 rtnl_mutex &ppp->wlock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#4 irq_context: 0 rtnl_mutex &ppp->wlock &ppp->rlock irq_context: 0 rtnl_mutex &pn->all_ppp_mutex irq_context: 0 rtnl_mutex &pn->all_ppp_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex &pn->all_ppp_mutex pool_lock#2 irq_context: 0 rtnl_mutex &pf->rwait irq_context: 0 rtnl_mutex &root->kernfs_rwsem &____s->seqcount irq_context: 0 rtnl_mutex &root->kernfs_rwsem rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (gc_work).work &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_power_efficient (gc_work).work pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss batched_entropy_u8.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss kfence_freelist_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &meta->lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 remove_cache_srcu irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 remove_cache_srcu &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 remove_cache_srcu quarantine_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 remove_cache_srcu &c->lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 remove_cache_srcu &n->list_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 remove_cache_srcu pool_lock#2 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &disk->open_mutex &lo->lo_mutex &rq->__lock irq_context: 0 &disk->open_mutex &lo->lo_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ppp->wlock irq_context: 0 &ppp->wlock &ppp->rlock irq_context: 0 &list->lock#33 irq_context: 0 bt_proto_lock rfcomm_sk_list.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_RFCOMM irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_RFCOMM slock-AF_BLUETOOTH-BTPROTO_RFCOMM irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_BLUETOOTH-BTPROTO_RFCOMM irq_context: 0 &sb->s_type->i_mutex_key#10 rfcomm_sk_list.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &d->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &list->lock#34 irq_context: 0 &knet->mutex irq_context: 0 &mux->lock irq_context: 0 &mux->rx_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_KCM irq_context: 0 sk_lock-AF_KCM slock-AF_KCM irq_context: 0 sk_lock-AF_KCM fs_reclaim irq_context: 0 sk_lock-AF_KCM fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_KCM pool_lock#2 irq_context: 0 sk_lock-AF_KCM &____s->seqcount irq_context: 0 sk_lock-AF_KCM &mm->mmap_lock irq_context: 0 sk_lock-AF_KCM &mm->mmap_lock &rq->__lock irq_context: 0 sk_lock-AF_KCM &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_KCM &mux->lock irq_context: 0 slock-AF_KCM irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 sk_lock-AF_INET batched_entropy_u16.lock crngs.lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 sk_lock-AF_INET &ei->socket.wq.wait irq_context: 0 sk_lock-AF_INET &hashinfo->ehash_locks[i] irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_KCM irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_KCM &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_KCM &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_KCM slock-AF_KCM irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_KCM clock-AF_KCM irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_KCM &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_KCM pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_KCM irq_context: 0 &sb->s_type->i_mutex_key#10 &mux->lock irq_context: 0 &sb->s_type->i_mutex_key#10 (work_completion)(&kcm->tx_work) irq_context: 0 &sb->s_type->i_mutex_key#10 &mux->rx_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &mux->rx_lock rlock-AF_KCM irq_context: 0 &sb->s_type->i_mutex_key#10 &knet->mutex irq_context: 0 rtnl_mutex &tn->lock &rq->__lock irq_context: 0 rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &br->lock &c->lock irq_context: 0 rtnl_mutex &br->lock pool_lock#2 irq_context: 0 rtnl_mutex &br->lock &dir->lock#2 irq_context: 0 rtnl_mutex &br->lock deferred_lock irq_context: 0 rtnl_mutex &br->lock (console_sem).lock irq_context: 0 rtnl_mutex &br->lock console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex &br->lock console_lock console_srcu console_owner irq_context: 0 rtnl_mutex &br->lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &br->lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 rtnl_mutex &br->lock nl_table_lock irq_context: 0 rtnl_mutex &br->lock nl_table_wait.lock irq_context: 0 rtnl_mutex &br->lock &br->multicast_lock irq_context: 0 rtnl_mutex &br->lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &br->lock &br->multicast_lock &base->lock irq_context: 0 rtnl_mutex &br->lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &tbl->lock &n->lock irq_context: 0 rtnl_mutex &tbl->lock &n->lock &____s->seqcount#9 irq_context: 0 rtnl_mutex &tbl->lock nl_table_lock irq_context: 0 rtnl_mutex &tbl->lock nl_table_wait.lock irq_context: 0 rtnl_mutex &tbl->lock rcu_read_lock lock#8 irq_context: 0 rtnl_mutex &tbl->lock rcu_read_lock id_table_lock irq_context: 0 rtnl_mutex &tbl->lock &dir->lock#2 irq_context: 0 rtnl_mutex &tbl->lock krc.lock irq_context: 0 rtnl_mutex &dev->tx_global_lock irq_context: 0 rtnl_mutex &dev->tx_global_lock _xmit_NETROM irq_context: 0 rtnl_mutex &dev->tx_global_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex dev->qdisc_tx_busylock ?: &qdisc_tx_busylock irq_context: 0 rtnl_mutex dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &sch->q.lock irq_context: 0 rtnl_mutex &sch->q.lock irq_context: 0 rtnl_mutex __ip_vs_mutex irq_context: 0 rtnl_mutex __ip_vs_mutex &ipvs->dest_trash_lock irq_context: 0 rtnl_mutex _xmit_ETHER &obj_hash[i].lock irq_context: 0 rtnl_mutex _xmit_ETHER krc.lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex flowtable_lock irq_context: 0 rtnl_mutex flowtable_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock &c->lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock &data->fib_event_queue_lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock &c->lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock &n->list_lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock &____s->seqcount irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock nl_table_lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock nl_table_wait.lock irq_context: 0 rtnl_mutex &net->ipv6.addrconf_hash_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 nf_conntrack_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &br->lock &n->list_lock irq_context: 0 rtnl_mutex &br->lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex &br->lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &br->lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &n->list_lock &c->lock irq_context: 0 cb_lock &rdev->wiphy.mtx quarantine_lock irq_context: 0 cb_lock &rdev->wiphy.mtx batched_entropy_u8.lock irq_context: 0 cb_lock &rdev->wiphy.mtx kfence_freelist_lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &c->lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &n->list_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &n->list_lock &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock &n->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock &n->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock &n->lock &____s->seqcount#9 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock nl_table_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock nl_table_wait.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock rcu_read_lock lock#8 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock rcu_read_lock id_table_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock &dir->lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tbl->lock krc.lock irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock &____s->seqcount#2 irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 pgd_lock irq_context: 0 &sb->s_type->i_mutex_key#8 stock_lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 key irq_context: 0 &sb->s_type->i_mutex_key#8 pcpu_lock irq_context: 0 &sb->s_type->i_mutex_key#8 percpu_counters_lock irq_context: 0 &sb->s_type->i_mutex_key#8 pcpu_lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &folio_wait_table[i] irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &folio_wait_table[i] irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &folio_wait_table[i] &p->pi_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &folio_wait_table[i] &p->pi_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &folio_wait_table[i] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 bt_proto_lock &____s->seqcount#2 irq_context: 0 bt_proto_lock &____s->seqcount irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &____s->seqcount#2 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &n->list_lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock &n->list_lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#9 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_RXRPC irq_context: 0 &sb->s_type->i_mutex_key#10 (wq_completion)krxrpcd irq_context: 0 &sb->s_type->i_mutex_key#10 &wq->mutex irq_context: 0 &sb->s_type->i_mutex_key#10 &wq->mutex &pool->lock/1 irq_context: 0 &sb->s_type->i_mutex_key#10 &wq->mutex &x->wait#10 irq_context: 0 &sb->s_type->i_mutex_key#10 rlock-AF_RXRPC irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET slock-AF_PHONET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET &pnsocks.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET resource_mutex irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_LLC llc_sap_list_lock irq_context: 0 sk_lock-AF_LLC llc_sap_list_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_PHONET irq_context: 0 &sb->s_type->i_mutex_key#10 &list->lock#35 irq_context: 0 sk_lock-AF_LLC rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: 0 sk_lock-AF_LLC rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &____s->seqcount irq_context: 0 sk_lock-AF_LLC rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_LLC rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_LLC rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_LLC rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_LLC rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_LLC rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: 0 sk_lock-AF_LLC rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: 0 sk_lock-AF_LLC &rq->__lock irq_context: 0 sk_lock-AF_LLC rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_LLC rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_LLC &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_LLC rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_LLC rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 kauditd_wait.lock &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 kauditd_wait.lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 kauditd_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_PHONET irq_context: 0 sk_lock-AF_PHONET slock-AF_PHONET irq_context: 0 slock-AF_PHONET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle lock#4 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle lock#4 &obj_hash[i].lock irq_context: 0 rtnl_mutex &tn->idrinfo->lock#2 irq_context: 0 rtnl_mutex &tn->idrinfo->lock#2 fs_reclaim irq_context: 0 rtnl_mutex &tn->idrinfo->lock#2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &tn->idrinfo->lock#2 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 rtnl_mutex &tn->idrinfo->lock#2 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &tn->idrinfo->lock#2 pool_lock#2 irq_context: 0 rtnl_mutex zones_mutex irq_context: 0 rtnl_mutex zones_mutex fs_reclaim irq_context: 0 rtnl_mutex zones_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex zones_mutex pool_lock#2 irq_context: 0 rtnl_mutex zones_mutex rcu_read_lock rhashtable_bucket irq_context: 0 rtnl_mutex zones_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex zones_mutex batched_entropy_u32.lock irq_context: 0 rtnl_mutex zones_mutex &base->lock irq_context: 0 rtnl_mutex zones_mutex &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex zones_mutex &rq->__lock irq_context: 0 rtnl_mutex zones_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex zones_mutex flowtable_lock irq_context: 0 rtnl_mutex &p->tcfa_lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex flow_indr_block_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &n->list_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &nsim_trap_data->trap_lock crngs.lock base_crng.lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 irq_context: softirq (&icsk->icsk_retransmit_timer) &obj_hash[i].lock irq_context: softirq (&icsk->icsk_retransmit_timer) rcu_read_lock pool_lock#2 irq_context: softirq (&icsk->icsk_retransmit_timer) pool_lock#2 irq_context: softirq (&icsk->icsk_retransmit_timer) &dir->lock irq_context: softirq (&icsk->icsk_retransmit_timer) stock_lock irq_context: 0 sb_writers#4 sb_writers#4 &rq->__lock irq_context: 0 sb_writers#4 mapping.invalidate_lock mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &lruvec->lru_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock &base->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_es_lock key#2 irq_context: 0 sb_writers#4 mapping.invalidate_lock fs_reclaim irq_context: 0 sb_writers#4 mapping.invalidate_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 mapping.invalidate_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &pool->lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 llc_sap_list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 stock_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex fs_reclaim irq_context: 0 vlan_ioctl_mutex rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 vlan_ioctl_mutex rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex stock_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex stack_depot_init_mutex irq_context: 0 vlan_ioctl_mutex rtnl_mutex &vlan_netdev_addr_lock_key/1 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &vlan_netdev_addr_lock_key/1 pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &vlan_netdev_addr_lock_key/1 _xmit_ETHER irq_context: 0 vlan_ioctl_mutex rtnl_mutex &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex batched_entropy_u32.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &base->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &folio_wait_table[i] irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex pcpu_alloc_mutex irq_context: 0 vlan_ioctl_mutex rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &dir->lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &xa->xa_lock#3 irq_context: 0 vlan_ioctl_mutex rtnl_mutex net_rwsem irq_context: 0 vlan_ioctl_mutex rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tn->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &x->wait#9 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &k->list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex gdp_mutex irq_context: 0 vlan_ioctl_mutex rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex lock kernfs_idr_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 vlan_ioctl_mutex rtnl_mutex bus_type_sem irq_context: 0 vlan_ioctl_mutex rtnl_mutex sysfs_symlink_target_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &____s->seqcount irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock &q->requeue_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex lock kernfs_idr_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex &dev->power.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex dpm_list_mtx irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex subsys mutex#17 irq_context: 0 vlan_ioctl_mutex rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &x->wait#26 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex &n->list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &n->list_lock &c->lock irq_context: 0 sb_writers#4 sb_writers#4 &c->lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu pool_lock#2 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu &rq->__lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex tomoyo_ss remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex dev_hotplug_mutex irq_context: 0 vlan_ioctl_mutex rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex dev_base_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex input_pool.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex sysctl_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex nl_table_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex nl_table_wait.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex failover_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex proc_subdir_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex proc_subdir_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock rcu_read_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &pnettable->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex smc_ib_devices.mutex irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex lweventlist_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex lweventlist_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex lweventlist_lock &dir->lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex.wait_lock irq_context: 0 vlan_ioctl_mutex &p->pi_lock irq_context: 0 vlan_ioctl_mutex &p->pi_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex &rq->__lock irq_context: 0 vlan_ioctl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem bit_wait_table + i irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem bit_wait_table + i irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_lock_key#22 &xa->xa_lock#8 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &lruvec->lru_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &journal->j_state_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &journal->j_state_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &journal->j_state_lock &base->lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_writers#4 &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle irq_context: 0 sb_writers#4 sb_writers#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_writers#4 &____s->seqcount#2 irq_context: 0 sb_writers#4 sb_writers#4 &____s->seqcount irq_context: 0 sb_writers#4 sb_writers#4 &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &c->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx batched_entropy_u8.lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx kfence_freelist_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &base->lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &base->lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem rcu_read_lock pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &hctx->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock rcu_read_lock &sta->rate_ctrl_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 mapping.invalidate_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 batched_entropy_u8.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 kfence_freelist_lock irq_context: 0 &tfile->napi_mutex irq_context: 0 &tfile->napi_mutex &rq->__lock irq_context: 0 &tfile->napi_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &tfile->napi_mutex &____s->seqcount irq_context: 0 &tfile->napi_mutex pool_lock#2 irq_context: 0 &tfile->napi_mutex &mm->mmap_lock irq_context: 0 &tfile->napi_mutex rcu_read_lock tk_core.seq.seqcount irq_context: 0 &tfile->napi_mutex rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &tfile->napi_mutex rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 cb_lock fs_reclaim rcu_node_0 irq_context: 0 cb_lock fs_reclaim &rq->__lock irq_context: 0 cb_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex rcu_read_lock &pool->lock irq_context: 0 &xt[i].mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &xt[i].mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &xt[i].mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &xt[i].mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex flowtable_lock &ht->lock irq_context: 0 rtnl_mutex flowtable_lock rcu_read_lock &ht->lock irq_context: 0 rtnl_mutex flowtable_lock &(&flowtable->gc_work)->timer irq_context: 0 rtnl_mutex flowtable_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex flowtable_lock &base->lock irq_context: 0 rtnl_mutex flowtable_lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex flowtable_lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex flowtable_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex flowtable_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex flowtable_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex flowtable_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex flowtable_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex flowtable_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&flowtable->gc_work)->work) irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&flowtable->gc_work)->work) &ht->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&flowtable->gc_work)->work) rcu_read_lock &ht->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&flowtable->gc_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&flowtable->gc_work)->work) &base->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&flowtable->gc_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex flowtable_lock (work_completion)(&(&flowtable->gc_work)->work) irq_context: 0 rtnl_mutex flowtable_lock (wq_completion)nf_ft_offload_add irq_context: 0 rtnl_mutex flowtable_lock &wq->mutex irq_context: 0 rtnl_mutex flowtable_lock &wq->mutex &pool->lock/1 irq_context: 0 rtnl_mutex flowtable_lock &wq->mutex &x->wait#10 irq_context: 0 rtnl_mutex flowtable_lock (wq_completion)nf_ft_offload_del irq_context: 0 rtnl_mutex flowtable_lock (wq_completion)nf_ft_offload_stats irq_context: 0 rtnl_mutex &net->ipv6.addrconf_hash_lock &base->lock irq_context: 0 rtnl_mutex &net->ipv6.addrconf_hash_lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &idev->mc_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 &sbi->s_writepages_rwsem &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->lock &____s->seqcount#9 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock nl_table_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock nl_table_wait.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock lock#8 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock id_table_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &dir->lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock krc.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 rtnl_mutex &rnp->exp_lock irq_context: 0 rtnl_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock init_mm.page_table_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock init_mm.page_table_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &ul->lock#2 pool_lock#2 irq_context: 0 rtnl_mutex &ul->lock#2 &dir->lock#2 irq_context: 0 rtnl_mutex rcu_state.exp_mutex irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 &ei->i_data_sem &____s->seqcount irq_context: 0 &ei->i_data_sem rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &base->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 callchain_mutex irq_context: 0 callchain_mutex fs_reclaim irq_context: 0 callchain_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 callchain_mutex pool_lock#2 irq_context: 0 callchain_mutex &c->lock irq_context: 0 callchain_mutex &n->list_lock irq_context: 0 callchain_mutex &n->list_lock &c->lock irq_context: 0 callchain_mutex &rq->__lock irq_context: 0 callchain_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) callchain_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) callchain_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) callchain_mutex pool_lock#2 irq_context: 0 nfnl_subsys_ctnetlink nf_conntrack_mutex irq_context: 0 nfnl_subsys_ctnetlink nf_conntrack_mutex &nf_conntrack_locks[i] irq_context: 0 nfnl_subsys_ctnetlink nf_conntrack_mutex &rq->__lock irq_context: 0 nfnl_subsys_ctnetlink nf_conntrack_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nfnl_subsys_ctnetlink nf_conntrack_mutex &____s->seqcount#7 irq_context: 0 nfnl_subsys_ctnetlink nf_conntrack_mutex &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 nfnl_subsys_ctnetlink nf_conntrack_mutex &nf_conntrack_locks[i]/1 irq_context: 0 nfnl_subsys_ctnetlink nf_conntrack_mutex &obj_hash[i].lock irq_context: 0 nfnl_subsys_ctnetlink nf_conntrack_mutex pool_lock#2 irq_context: 0 nfnl_subsys_ctnetlink nf_conntrack_mutex &c->lock irq_context: 0 nfnl_subsys_ctnetlink nf_conntrack_mutex rcu_read_lock &nf_nat_locks[i] irq_context: 0 sb_writers#4 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 rcu_read_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex pcpu_alloc_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 rtnl_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &n->list_lock irq_context: 0 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_nat_locks[i] irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock wlock-AF_UNSPEC irq_context: softirq wlock-AF_UNSPEC irq_context: 0 &p->lock &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &rq->__lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &rq->__lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &root->kernfs_rwsem &p->pi_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem bit_wait_table + i irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &obj_hash[i].lock pool_lock irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &mapping->private_lock irq_context: 0 sb_writers#4 jbd2_handle bit_wait_table + i irq_context: 0 sb_writers#4 jbd2_handle &rq->__lock irq_context: 0 sb_writers#4 jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sem->wait_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &vlan_netdev_addr_lock_key/1 &c->lock irq_context: 0 nfnl_subsys_ipset &n->list_lock irq_context: 0 nfnl_subsys_ipset &n->list_lock &c->lock irq_context: 0 nfnl_subsys_ipset &rq->__lock irq_context: 0 nfnl_subsys_ipset &base->lock irq_context: 0 nfnl_subsys_ipset &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &rq->__lock irq_context: 0 sb_writers#4 &rq->__lock &cfs_rq->removed.lock irq_context: 0 vlan_ioctl_mutex vlan_ioctl_mutex.wait_lock irq_context: 0 nfnl_subsys_ipset rcu_state.barrier_mutex irq_context: 0 nfnl_subsys_ipset rcu_state.barrier_mutex rcu_state.barrier_lock irq_context: 0 nfnl_subsys_ipset rcu_state.barrier_mutex rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 nfnl_subsys_ipset rcu_state.barrier_mutex &x->wait#24 irq_context: 0 nfnl_subsys_ipset rcu_state.barrier_mutex &rq->__lock irq_context: 0 nfnl_subsys_ipset rcu_state.barrier_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex &____s->seqcount#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ret->b_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle bit_wait_table + i irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex lweventlist_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex lweventlist_lock &n->list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex lweventlist_lock &n->list_lock &c->lock irq_context: 0 vlan_ioctl_mutex.wait_lock irq_context: 0 nfnl_subsys_ipset ip_set_ref_lock irq_context: 0 nfnl_subsys_ipset (work_completion)(&(&gc->dwork)->work) irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem stock_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &xa->xa_lock#8 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem lock#4 irq_context: 0 rtnl_mutex &ul->lock#2 &c->lock irq_context: 0 rtnl_mutex &tbl->lock &c->lock irq_context: 0 rtnl_mutex &tbl->lock &n->list_lock irq_context: 0 rtnl_mutex &tbl->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock &pool->lock/1 irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->roc_work)->work) irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->roc_work)->work) &local->mtx irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->roc_work)->work) &local->mtx &obj_hash[i].lock irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->roc_work)->work) &local->mtx &base->lock irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->roc_work)->work) &local->mtx &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->roc_work)->work) &local->mtx fs_reclaim irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->roc_work)->work) &local->mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->roc_work)->work) &local->mtx pool_lock#2 irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->roc_work)->work) &local->mtx &c->lock irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->roc_work)->work) &local->mtx &____s->seqcount#2 irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->roc_work)->work) &local->mtx &____s->seqcount irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->roc_work)->work) &local->mtx nl_table_lock irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->roc_work)->work) &local->mtx nl_table_wait.lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx nl_table_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx nl_table_wait.lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &base->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock key#8 irq_context: 0 &journal->j_state_lock &journal->j_wait_transaction_locked &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &meta_group_info[i]->alloc_sem &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &meta_group_info[i]->alloc_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 mapping.invalidate_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle &____s->seqcount irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle pool_lock#2 irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle &obj_hash[i].lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock &base->lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 remove_cache_srcu irq_context: 0 sb_writers#4 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &xa->xa_lock#8 stock_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &xa->xa_lock#8 pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &c->lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &____s->seqcount#2 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &____s->seqcount irq_context: 0 sb_writers#4 sb_writers#4 batched_entropy_u8.lock irq_context: 0 sb_writers#4 sb_writers#4 kfence_freelist_lock irq_context: 0 sb_writers#4 sb_writers#4 &meta->lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_node_0 irq_context: softirq &(&local->roc_work)->timer irq_context: softirq &(&local->roc_work)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&local->roc_work)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&local->roc_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq &(&local->roc_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&local->roc_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->roc_work)->work) &local->mtx &lock->wait_lock irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->roc_work)->work) &local->mtx &rq->__lock irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->roc_work)->work) &local->mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rtnl_mutex irq_context: 0 cb_lock &rdev->wiphy.mtx &lock->wait_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rtnl_mutex &rq->__lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 mapping.invalidate_lock mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 sb_writers#4 sb_writers#4 remove_cache_srcu irq_context: 0 sb_writers#4 sb_writers#4 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 sb_writers#4 remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 sb_writers#4 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 sb_writers#4 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 sb_writers#4 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_writers#4 remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 sb_writers#4 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 sb_writers#4 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_writers#4 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 sb_writers#4 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 sb_writers#4 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex flowtable_lock &x->wait#10 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&barr->work) irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&barr->work) &x->wait#10 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&barr->work) &x->wait#10 &p->pi_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &____s->seqcount#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &____s->seqcount irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->roc_work)->work) &local->mtx &n->list_lock irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->roc_work)->work) &local->mtx &n->list_lock &c->lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_node_0 irq_context: 0 cb_lock &rdev->wiphy.mtx fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &base->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 &mm->mmap_lock mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &mm->mmap_lock mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_es_lock key#8 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_es_lock key#7 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 &obj_hash[i].lock pool_lock irq_context: 0 &p->lock &of->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &pcp->lock &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->roc_work)->work) &local->mtx quarantine_lock irq_context: 0 sb_writers#4 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle bit_wait_table + i irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &journal->j_revoke_lock irq_context: 0 sb_writers#4 rcu_node_0 irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) vmap_area_lock irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) purge_vmap_area_lock irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) purge_vmap_area_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) purge_vmap_area_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sb->s_type->i_mutex_key#8 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_writers#4 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 sb_writers#4 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_lock_key#22 &xa->xa_lock#8 &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_lock_key#22 &xa->xa_lock#8 pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &ei->i_es_lock key#2 irq_context: softirq &fq->mq_flush_lock bit_wait_table + i &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &obj_hash[i].lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &fq->mq_flush_lock &x->wait#26 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &folio_wait_table[i] &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &folio_wait_table[i] &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &folio_wait_table[i] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: softirq &(&hctx->run_work)->timer irq_context: softirq &(&hctx->run_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&hctx->run_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &____s->seqcount irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx quarantine_lock irq_context: softirq rcu_read_lock rcu_read_lock quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock batched_entropy_u8.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock kfence_freelist_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_es_lock &meta->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ret->b_state_lock bit_wait_table + i irq_context: 0 (wq_completion)phy5 (work_completion)(&(&local->roc_work)->work) irq_context: 0 (wq_completion)phy5 (work_completion)(&(&local->roc_work)->work) &local->mtx irq_context: 0 (wq_completion)phy5 (work_completion)(&(&local->roc_work)->work) &local->mtx &rq->__lock irq_context: 0 (wq_completion)phy5 (work_completion)(&(&local->roc_work)->work) &local->mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy5 (work_completion)(&(&local->roc_work)->work) &local->mtx &obj_hash[i].lock irq_context: 0 (wq_completion)phy5 (work_completion)(&(&local->roc_work)->work) &local->mtx &base->lock irq_context: 0 (wq_completion)phy5 (work_completion)(&(&local->roc_work)->work) &local->mtx &base->lock &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &lock->wait_lock irq_context: 0 (wq_completion)phy5 (work_completion)(&(&local->roc_work)->work) &local->mtx fs_reclaim irq_context: 0 (wq_completion)phy5 (work_completion)(&(&local->roc_work)->work) &local->mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)phy5 (work_completion)(&(&local->roc_work)->work) &local->mtx &c->lock irq_context: 0 (wq_completion)phy5 (work_completion)(&(&local->roc_work)->work) &local->mtx nl_table_lock irq_context: 0 (wq_completion)phy5 (work_completion)(&(&local->roc_work)->work) &local->mtx nl_table_wait.lock irq_context: 0 (wq_completion)phy5 (work_completion)(&(&local->roc_work)->work) &lock->wait_lock irq_context: 0 (wq_completion)phy5 (work_completion)(&(&local->roc_work)->work) &p->pi_lock irq_context: 0 (wq_completion)phy5 (work_completion)(&(&local->roc_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock irq_context: 0 (wq_completion)phy5 (work_completion)(&(&local->roc_work)->work) &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock pool_lock#2 irq_context: 0 (wq_completion)phy5 (work_completion)(&(&local->roc_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock pcpu_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock &obj_hash[i].lock irq_context: 0 (wq_completion)phy5 (work_completion)(&(&local->roc_work)->work) &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock percpu_counters_lock irq_context: 0 (wq_completion)phy5 (work_completion)(&(&local->roc_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock &blkcg->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &q->queue_lock &blkcg->lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy12 (work_completion)(&(&local->roc_work)->work) irq_context: 0 (wq_completion)phy12 (work_completion)(&(&local->roc_work)->work) &local->mtx irq_context: 0 (wq_completion)phy12 (work_completion)(&(&local->roc_work)->work) &local->mtx &lock->wait_lock irq_context: 0 (wq_completion)phy12 (work_completion)(&(&local->roc_work)->work) &local->mtx &rq->__lock irq_context: 0 (wq_completion)phy12 (work_completion)(&(&local->roc_work)->work) &local->mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy12 (work_completion)(&(&local->roc_work)->work) &local->mtx &obj_hash[i].lock irq_context: 0 (wq_completion)phy12 (work_completion)(&(&local->roc_work)->work) &local->mtx &base->lock irq_context: 0 (wq_completion)phy12 (work_completion)(&(&local->roc_work)->work) &local->mtx &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)phy12 (work_completion)(&(&local->roc_work)->work) &local->mtx fs_reclaim irq_context: 0 (wq_completion)phy12 (work_completion)(&(&local->roc_work)->work) &local->mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)phy12 (work_completion)(&(&local->roc_work)->work) &local->mtx &c->lock irq_context: 0 (wq_completion)phy12 (work_completion)(&(&local->roc_work)->work) &local->mtx &____s->seqcount#2 irq_context: 0 (wq_completion)phy12 (work_completion)(&(&local->roc_work)->work) &local->mtx &____s->seqcount irq_context: 0 (wq_completion)phy12 (work_completion)(&(&local->roc_work)->work) &local->mtx nl_table_lock irq_context: 0 (wq_completion)phy12 (work_completion)(&(&local->roc_work)->work) &local->mtx nl_table_wait.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 bit_wait_table + i irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->roc_work)->work) &local->mtx remove_cache_srcu irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->roc_work)->work) &local->mtx remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->roc_work)->work) &local->mtx remove_cache_srcu &c->lock irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->roc_work)->work) &local->mtx remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->roc_work)->work) &local->mtx remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->roc_work)->work) &local->mtx remove_cache_srcu &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem bit_wait_table + i irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &hctx->lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 pool_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &____s->seqcount#2 irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock &cfs_rq->removed.lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &lock->wait_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#8 key#12 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#8 key#13 irq_context: 0 (wq_completion)phy5 (work_completion)(&(&local->roc_work)->work) &local->mtx remove_cache_srcu irq_context: 0 (wq_completion)phy5 (work_completion)(&(&local->roc_work)->work) &local->mtx remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)phy5 (work_completion)(&(&local->roc_work)->work) &local->mtx remove_cache_srcu &c->lock irq_context: 0 (wq_completion)phy5 (work_completion)(&(&local->roc_work)->work) &local->mtx remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)phy5 (work_completion)(&(&local->roc_work)->work) &local->mtx remove_cache_srcu &obj_hash[i].lock irq_context: softirq &(&flowtable->gc_work)->timer irq_context: softirq &(&flowtable->gc_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&flowtable->gc_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&flowtable->gc_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &lock->wait_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &folio_wait_table[i] irq_context: 0 sb_writers#4 sb_internal jbd2_handle &journal->j_wait_updates &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &journal->j_list_lock key#15 irq_context: 0 &journal->j_checkpoint_mutex &fq->mq_flush_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &pcp->lock &zone->lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) key#14 irq_context: 0 sb_writers#4 bit_wait_table + i irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 remove_cache_srcu irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock key#7 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &cfs_rq->removed.lock irq_context: 0 (wq_completion)phy5 (work_completion)(&(&local->roc_work)->work) &local->mtx &n->list_lock irq_context: 0 (wq_completion)phy5 (work_completion)(&(&local->roc_work)->work) &local->mtx remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)phy12 (work_completion)(&(&local->roc_work)->work) &local->mtx &n->list_lock irq_context: 0 (wq_completion)phy12 (work_completion)(&(&local->roc_work)->work) &local->mtx &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &c->lock irq_context: 0 &iint->mutex mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &iint->mutex mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle &____s->seqcount irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle pool_lock#2 irq_context: 0 (wq_completion)phy5 (work_completion)(&(&local->roc_work)->work) &local->mtx &____s->seqcount#2 irq_context: 0 (wq_completion)phy5 (work_completion)(&(&local->roc_work)->work) &local->mtx &____s->seqcount irq_context: 0 &mm->mmap_lock sb_pagefaults &n->list_lock irq_context: 0 &mm->mmap_lock sb_pagefaults &n->list_lock &c->lock irq_context: 0 sb_writers#3 oom_adj_mutex rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#3 oom_adj_mutex rcu_read_lock &rq->__lock irq_context: 0 sb_writers#3 oom_adj_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 mapping.invalidate_lock &xa->xa_lock#8 &pcp->lock &zone->lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &xa->xa_lock#8 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_wait_updates &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 batched_entropy_u8.lock irq_context: 0 cb_lock &rdev->wiphy.mtx &base->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)afs (work_completion)(&net->cells_manager) &rq->__lock irq_context: 0 (wq_completion)afs (work_completion)(&net->fs_manager) &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &rnp->exp_wq[3] irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &base->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[1] &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)phy5 (work_completion)(&(&local->roc_work)->work) &local->mtx &lock->wait_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &____s->seqcount irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &dd->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &pool->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ret->b_state_lock bit_wait_table + i irq_context: 0 (wq_completion)kblockd (work_completion)(&q->timeout_work) &tags->lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &base->lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock pool_lock#2 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &journal->j_wait_updates &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem &folio_wait_table[i] irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &folio_wait_table[i] irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &folio_wait_table[i] &p->pi_lock irq_context: softirq &(&flowtable->gc_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&flowtable->gc_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy5 (work_completion)(&(&local->roc_work)->work) &local->mtx &n->list_lock &c->lock irq_context: 0 sb_writers#4 sb_internal &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &base->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) remove_cache_srcu irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) remove_cache_srcu quarantine_lock irq_context: 0 &xt[i].mutex rcu_node_0 irq_context: 0 &xt[i].mutex &rcu_state.expedited_wq irq_context: 0 &xt[i].mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &xt[i].mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &xt[i].mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ret->b_state_lock &journal->j_list_lock &base->lock irq_context: 0 &ret->b_state_lock &journal->j_list_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle &pcp->lock &zone->lock irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_node_0 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock &mapping->private_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) pool_lock#2 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock key#8 irq_context: 0 nfnl_subsys_ctnetlink_exp nf_conntrack_expect_lock irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &nf_conntrack_locks[i] batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock &____s->seqcount#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET6 batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET6 kfence_freelist_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u8.lock irq_context: softirq rcu_read_lock rcu_read_lock &meta->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET6 &pcp->lock &zone->lock irq_context: softirq rcu_read_lock batched_entropy_u8.lock crngs.lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock batched_entropy_u8.lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock kfence_freelist_lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &____s->seqcount irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 &sb->s_type->i_lock_key#22 &xa->xa_lock#8 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sk_lock-AF_INET6 fs_reclaim &rq->__lock irq_context: 0 sk_lock-AF_INET6 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx fs_reclaim &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 proto_tab_lock irq_context: 0 proto_tab_lock pool_lock#2 irq_context: 0 proto_tab_lock &dir->lock irq_context: 0 proto_tab_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_NFC irq_context: 0 &sb->s_type->i_mutex_key#10 rlock-AF_NFC irq_context: 0 &sb->s_type->i_mutex_key#10 &list->lock#36 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sk_lock-AF_INET6 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock &pcp->lock &zone->lock irq_context: 0 rcu_read_lock &m->lock irq_context: 0 rcu_read_lock &m->lock &xs->map_list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock &m->lock &xs->map_list_lock stock_lock irq_context: 0 rcu_read_lock &m->lock &xs->map_list_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock &m->lock &xs->map_list_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &m->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &m->lock &xs->map_list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &m->lock &xs->map_list_lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &m->lock &xs->map_list_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &m->lock &xs->map_list_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 map_idr_lock irq_context: 0 &sb->s_type->i_mutex_key#10 map_idr_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 map_idr_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &pool->lock/1 irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &sbi->s_md_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 remove_cache_srcu &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &ei->i_prealloc_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock ptlock_ptr(page) irq_context: 0 &iint->mutex mapping.invalidate_lock stock_lock irq_context: 0 &iint->mutex mapping.invalidate_lock &xa->xa_lock#8 stock_lock irq_context: 0 &iint->mutex mapping.invalidate_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &table->lock#3 irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) &meta->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) kfence_freelist_lock irq_context: 0 jbd2_handle irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &n->list_lock &c->lock irq_context: 0 &journal->j_barrier irq_context: 0 &journal->j_barrier &journal->j_state_lock irq_context: 0 &journal->j_barrier &journal->j_state_lock &journal->j_wait_commit irq_context: 0 &journal->j_barrier &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 &journal->j_barrier &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 &journal->j_barrier &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_barrier &rq->__lock irq_context: 0 &journal->j_barrier &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_barrier &journal->j_state_lock irq_context: 0 &journal->j_barrier &journal->j_list_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_state_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_state_lock &journal->j_list_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex tk_core.seq.seqcount irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock tk_core.seq.seqcount irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock &q->requeue_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock &obj_hash[i].lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &rq->__lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 sctp_assocs_id_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex (&q->adapt_timer) irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &journal->j_wait_transaction_locked irq_context: 0 sb_writers#8 kn->active#5 remove_cache_srcu irq_context: 0 sb_writers#8 kn->active#5 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#8 kn->active#5 remove_cache_srcu &c->lock irq_context: 0 sb_writers#8 kn->active#5 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#8 kn->active#5 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &x->wait#26 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex mmu_notifier_invalidate_range_start irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex pool_lock#2 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &dd->lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &obj_hash[i].lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &pool->lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &journal->j_wait_transaction_locked irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_state_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_list_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &dd->lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock tk_core.seq.seqcount irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex bit_wait_table + i irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_list_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_lock_key#23 bit_wait_table + i irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_list_lock pool_lock#2 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &base->lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &base->lock &obj_hash[i].lock irq_context: 0 &iint->mutex rcu_node_0 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex (&timer.timer) irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &c->lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock pool_lock#2 irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 &journal->j_wait_transaction_locked irq_context: 0 &journal->j_wait_transaction_locked &p->pi_lock irq_context: 0 &journal->j_wait_transaction_locked &p->pi_lock &rq->__lock irq_context: 0 &journal->j_wait_transaction_locked &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &c->lock irq_context: 0 sb_writers#8 iattr_mutex &rq->__lock irq_context: 0 sb_writers#8 iattr_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fs_reclaim &rq->__lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &____s->seqcount#7 irq_context: 0 sk_lock-AF_INET rcu_read_lock &nf_nat_locks[i] irq_context: 0 sk_lock-AF_INET rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 sk_lock-AF_INET rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 sk_lock-AF_INET rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 sk_lock-AF_INET rcu_read_lock &tbl->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &tbl->lock pool_lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock &tbl->lock &c->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &tbl->lock batched_entropy_u32.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &tbl->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &n->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &n->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &n->lock &base->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &n->lock &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &n->lock pool_lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &table->lock#3 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 cb_lock genl_mutex nbd_index_mutex irq_context: 0 cb_lock genl_mutex &nbd->config_lock irq_context: 0 cb_lock genl_mutex &nbd->config_lock mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex &nbd->config_lock &c->lock irq_context: 0 cb_lock genl_mutex &nbd->config_lock &rq->__lock irq_context: 0 cb_lock genl_mutex &nbd->config_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 &iint->mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &iint->mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &iint->mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex &nbd->config_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex &nbd->config_lock (console_sem).lock irq_context: 0 cb_lock genl_mutex &nbd->config_lock console_lock console_srcu console_owner_lock irq_context: 0 cb_lock genl_mutex &nbd->config_lock console_lock console_srcu console_owner irq_context: 0 cb_lock genl_mutex &nbd->config_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 cb_lock genl_mutex &nbd->config_lock console_lock console_srcu console_owner console_owner_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u8.lock crngs.lock irq_context: 0 &iint->mutex mapping.invalidate_lock &____s->seqcount#2 irq_context: 0 rcu_read_lock &____s->seqcount#10 irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock_bh &zone->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh &zone->lock &____s->seqcount irq_context: 0 cb_lock genl_mutex &nbd->config_lock &bdev->bd_size_lock irq_context: 0 cb_lock genl_mutex &nbd->config_lock &q->queue_lock irq_context: 0 cb_lock genl_mutex &nbd->config_lock &ACCESS_PRIVATE(sdp, lock) irq_context: 0 cb_lock genl_mutex &nbd->config_lock set->srcu irq_context: 0 cb_lock genl_mutex &nbd->config_lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex &nbd->config_lock &ACCESS_PRIVATE(ssp->srcu_sup, lock) &ACCESS_PRIVATE(sdp, lock) irq_context: 0 cb_lock genl_mutex &nbd->config_lock &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock irq_context: 0 cb_lock genl_mutex &nbd->config_lock &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex &nbd->config_lock &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 cb_lock genl_mutex &nbd->config_lock &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock genl_mutex &nbd->config_lock &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex &nbd->config_lock &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&forw_packet_aggr->delayed_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock genl_mutex &nbd->config_lock &x->wait#3 irq_context: 0 cb_lock genl_mutex &nbd->config_lock set->srcu irq_context: softirq rcu_read_lock rcu_read_lock &pcp->lock &zone->lock irq_context: softirq rcu_read_lock hwsim_radio_lock &pcp->lock &zone->lock irq_context: 0 &type->i_mutex_dir_key#7 &dentry->d_lock &wq irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pcp->lock &zone->lock irq_context: 0 &ei->i_data_sem &rq->__lock irq_context: 0 &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ei->i_data_sem &mapping->private_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock kfence_freelist_lock irq_context: 0 sb_writers#8 tomoyo_ss &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock key#22 irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override pool_lock irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override &c->lock irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &journal->j_checkpoint_mutex batched_entropy_u8.lock irq_context: 0 &journal->j_checkpoint_mutex kfence_freelist_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &ei->i_raw_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &dd->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &pool->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem fill_pool_map-wait-type-override &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem fill_pool_map-wait-type-override pool_lock irq_context: 0 cb_lock (console_sem).lock irq_context: 0 cb_lock console_lock console_srcu console_owner_lock irq_context: 0 cb_lock console_lock console_srcu console_owner irq_context: 0 cb_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 cb_lock console_lock console_srcu console_owner console_owner_lock irq_context: softirq (&n->timer) &n->lock pool_lock#2 irq_context: softirq (&n->timer) &c->lock irq_context: softirq (&n->timer) &____s->seqcount#2 irq_context: softirq (&n->timer) &____s->seqcount irq_context: softirq (&n->timer) pool_lock#2 irq_context: softirq (&n->timer) rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&n->timer) rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&n->timer) rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq (&n->timer) rcu_read_lock_bh rcu_read_lock rcu_read_lock &table->lock#3 irq_context: softirq (&n->timer) rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&n->timer) rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&n->timer) rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq (&n->timer) rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq (&n->timer) &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sta->ampdu_mlme.mtx irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sta->ampdu_mlme.mtx &sta->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx (work_completion)(&sta->ampdu_mlme.work) irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock rhashtable_bucket irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_node_0 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sta->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx krc.lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &local->key_mtx irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx fs_reclaim irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx fs_reclaim &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &c->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &fq->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx nl_table_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx nl_table_wait.lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx pin_fs_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &dentry->d_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 pin_fs_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 mount_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 mount_lock mount_lock.seqcount irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &dentry->d_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &fsnotify_mark_srcu irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &xa->xa_lock#8 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &obj_hash[i].lock pool_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override &c->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex mapping.invalidate_lock mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &iint->mutex mapping.invalidate_lock mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override pool_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock &dentry->d_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &fsnotify_mark_srcu irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_lock_key#7 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &s->s_inode_list_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &xa->xa_lock#8 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock mount_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx mount_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx mount_lock mount_lock.seqcount irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &local->active_txq_lock[i] irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx (work_completion)(&sta->drv_deliver_wk) irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx (work_completion)(&sta->drv_deliver_wk) &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx (work_completion)(&sta->drv_deliver_wk) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock &____s->seqcount irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &local->chanctx_mtx irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx batched_entropy_u8.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &meta->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 &journal->j_wait_transaction_locked irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_barrier jbd2_handle irq_context: 0 &journal->j_barrier &journal->j_wait_commit irq_context: 0 &journal->j_barrier &journal->j_wait_done_commit irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &journal->j_wait_transaction_locked irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 tomoyo_ss &____s->seqcount#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 tomoyo_ss &____s->seqcount irq_context: 0 &journal->j_barrier &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock rcu_node_0 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &rq->__lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex mapping.invalidate_lock &n->list_lock irq_context: 0 &iint->mutex mapping.invalidate_lock &n->list_lock &c->lock irq_context: 0 kn->active#59 fs_reclaim irq_context: 0 kn->active#59 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#59 stock_lock irq_context: 0 kn->active#59 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#59 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#59 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#59 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 kn->active#59 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#59 &c->lock irq_context: 0 &f->f_pos_lock sb_writers#11 irq_context: 0 &f->f_pos_lock sb_writers#11 fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#11 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#11 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#11 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#11 &mm->mmap_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cpuset_hotplug_work irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cpu_hotplug_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cpu_hotplug_lock cpuset_mutex irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cpu_hotplug_lock cpuset_mutex fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cpu_hotplug_lock cpuset_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cpu_hotplug_lock cpuset_mutex pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex cpu_hotplug_lock cpuset_mutex &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#59 (wq_completion)cpuset_migrate_mm irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#59 &wq->mutex irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#59 &wq->mutex &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#59 &wq->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex mapping.invalidate_lock &xa->xa_lock#8 stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &iint->mutex mapping.invalidate_lock lock#4 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sk_lock-AF_INET6 sctp_assocs_id_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#59 &wq->mutex &pool->lock/1 irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#59 &wq->mutex &x->wait#10 irq_context: 0 &f->f_pos_lock sb_writers#11 &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 &list->lock#18 irq_context: 0 sk_lock-AF_INET6 &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock fs_reclaim irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock fs_reclaim &rq->__lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &kernfs_locks->open_file_mutex[count] krc.lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &kernfs_locks->open_file_mutex[count] krc.lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock stock_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock ptlock_ptr(page)#2 lock#4 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &list->lock#18 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx fill_pool_map-wait-type-override &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&hsr->announce_timer) rcu_read_lock &____s->seqcount#2 irq_context: 0 &mm->mmap_lock stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &pn->l2tp_tunnel_idr_lock irq_context: 0 &pn->l2tp_tunnel_idr_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET cpu_hotplug_lock irq_context: 0 sk_lock-AF_PPPOX fs_reclaim irq_context: 0 sk_lock-AF_PPPOX fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_PPPOX pool_lock#2 irq_context: 0 sk_lock-AF_PPPOX &ps->sk_lock irq_context: 0 sk_lock-AF_PPPOX &ps->sk_lock &tunnel->hlist_lock irq_context: 0 sk_lock-AF_PPPOX &ps->sk_lock &tunnel->hlist_lock &pn->l2tp_session_hlist_lock irq_context: 0 &pn->l2tp_tunnel_idr_lock &c->lock irq_context: 0 sk_lock-AF_PPPOX &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX slock-AF_PPPOX irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX clock-AF_PPPOX irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &tunnel->hlist_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &pn->l2tp_session_hlist_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX rcu_state.exp_mutex rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX rcu_state.exp_mutex &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &list->lock#37 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &ps->sk_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_PPPOX irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)l2tp irq_context: 0 (wq_completion)l2tp (work_completion)(&tunnel->del_work) irq_context: 0 (wq_completion)l2tp (work_completion)(&tunnel->del_work) &tunnel->hlist_lock irq_context: 0 (wq_completion)l2tp (work_completion)(&tunnel->del_work) &pn->l2tp_tunnel_idr_lock irq_context: 0 &sb->s_type->i_mutex_key#10 cpu_hotplug_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 (wq_completion)l2tp (work_completion)(&tunnel->del_work) &pn->l2tp_tunnel_idr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)l2tp (work_completion)(&tunnel->del_work) &pn->l2tp_tunnel_idr_lock pool_lock#2 irq_context: softirq rcu_callback rlock-AF_PPPOX irq_context: softirq rcu_callback wlock-AF_PPPOX irq_context: softirq rcu_callback clock-AF_INET irq_context: softirq rcu_callback krc.lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &msk->pm.lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 rcu_read_lock rcu_read_lock pcpu_lock irq_context: 0 fanout_mutex &c->lock irq_context: 0 fanout_mutex &n->list_lock irq_context: 0 fanout_mutex &n->list_lock &c->lock irq_context: softirq (&n->timer) &n->lock &c->lock irq_context: softirq (&n->timer) &n->lock &n->list_lock irq_context: softirq (&n->timer) &n->lock &n->list_lock &c->lock irq_context: 0 &pn->l2tp_tunnel_idr_lock &n->list_lock irq_context: 0 &pn->l2tp_tunnel_idr_lock &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 sb_writers#4 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_nat_locks[i] irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: 0 text_mutex text_mutex.wait_lock irq_context: 0 text_mutex &rq->__lock irq_context: 0 text_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex.wait_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex sb_writers#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 l2tp_ip_lock irq_context: 0 &sb->s_type->i_mutex_key#8 fs_reclaim &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &journal->j_wait_transaction_locked irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_list_lock &c->lock irq_context: 0 sb_writers#4 sb_internal &journal->j_wait_transaction_locked irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_wait_transaction_locked irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_wait_transaction_locked irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &obj_hash[i].lock irq_context: softirq rcu_read_lock hwsim_radio_lock batched_entropy_u8.lock irq_context: softirq rcu_read_lock hwsim_radio_lock kfence_freelist_lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &meta->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx kfence_freelist_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_node_0 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex ipcomp_resource_mutex irq_context: 0 &net->xfrm.xfrm_cfg_mutex ipcomp_resource_mutex pcpu_alloc_mutex irq_context: 0 &net->xfrm.xfrm_cfg_mutex ipcomp_resource_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex ipcomp_resource_mutex fs_reclaim irq_context: 0 &net->xfrm.xfrm_cfg_mutex ipcomp_resource_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &net->xfrm.xfrm_cfg_mutex ipcomp_resource_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex ipcomp_resource_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex ipcomp_resource_mutex &c->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex ipcomp_resource_mutex pool_lock#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex ipcomp_resource_mutex free_vmap_area_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex ipcomp_resource_mutex vmap_area_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex ipcomp_resource_mutex &____s->seqcount irq_context: 0 &net->xfrm.xfrm_cfg_mutex ipcomp_resource_mutex init_mm.page_table_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex ipcomp_resource_mutex &pcp->lock &zone->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex ipcomp_resource_mutex crypto_alg_sem irq_context: 0 &net->xfrm.xfrm_cfg_mutex ipcomp_resource_mutex rcu_read_lock rcu_node_0 irq_context: 0 &net->xfrm.xfrm_cfg_mutex ipcomp_resource_mutex rcu_read_lock &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex ipcomp_resource_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex ipcomp_resource_mutex &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex ipcomp_resource_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_wait_reserved irq_context: 0 &net->xfrm.xfrm_cfg_mutex ipcomp_resource_mutex fs_reclaim &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex ipcomp_resource_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_state_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_state_lock hrtimer_bases.lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_state_lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_state_lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_state_lock &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_state_lock &base->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_state_lock &base->lock &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &c->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &n->list_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &n->list_lock &c->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock rcu_node_0 irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock nl_table_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock nl_table_wait.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nfnl_subsys_ipset &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nfnl_subsys_ipset &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_wait_transaction_locked &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 l2tp_ip_lock irq_context: softirq (&n->timer) icmp_global.lock irq_context: softirq (&n->timer) icmp_global.lock batched_entropy_u8.lock irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock &c->lock irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock pool_lock#2 irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock &dir->lock#2 irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock &ul->lock irq_context: softirq (&n->timer) k-slock-AF_INET pool_lock#2 irq_context: softirq (&n->timer) k-slock-AF_INET batched_entropy_u32.lock irq_context: softirq (&n->timer) k-slock-AF_INET &obj_hash[i].lock irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock &____s->seqcount#10 irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq (&n->timer) rcu_read_lock id_table_lock irq_context: softirq (&n->timer) &n->lock irq_context: softirq (&n->timer) nl_table_lock irq_context: softirq (&n->timer) nl_table_wait.lock irq_context: 0 &iint->mutex mapping.invalidate_lock batched_entropy_u8.lock irq_context: 0 &iint->mutex mapping.invalidate_lock kfence_freelist_lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) quarantine_lock irq_context: 0 &iint->mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &iint->mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &xa->xa_lock#4 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &obj_hash[i].lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 stock_lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem &xa->xa_lock#4 irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 sb_writers#10 &type->i_mutex_dir_key#6 &root->kernfs_rwsem stock_lock irq_context: 0 kn->active#60 fs_reclaim irq_context: 0 kn->active#60 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#60 stock_lock irq_context: 0 kn->active#60 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#60 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#60 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#60 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#60 &kernfs_locks->open_file_mutex[count] &____s->seqcount#2 irq_context: 0 kn->active#60 &kernfs_locks->open_file_mutex[count] &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#10 irq_context: 0 &f->f_pos_lock sb_writers#10 fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#10 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#10 &mm->mmap_lock irq_context: 0 &f->f_pos_lock sb_writers#10 &of->mutex irq_context: 0 &f->f_pos_lock sb_writers#10 &obj_hash[i].lock irq_context: 0 misc_mtx &n->list_lock irq_context: 0 misc_mtx &n->list_lock &c->lock irq_context: 0 tracepoints_mutex &n->list_lock irq_context: 0 tracepoints_mutex &n->list_lock &c->lock irq_context: 0 &iint->mutex &cfs_rq->removed.lock irq_context: softirq &x->lock irq_context: softirq (&x->rtimer) irq_context: softirq (&x->rtimer) &x->lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock pool_lock#2 irq_context: 0 &iint->mutex mapping.invalidate_lock &cfs_rq->removed.lock irq_context: 0 &iint->mutex mapping.invalidate_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock &loc_l->lock irq_context: 0 rcu_read_lock &loc_l->lock &l->lock irq_context: softirq rcu_read_lock &local->rx_path_lock &rdev->mgmt_registrations_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &base->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->lock &____s->seqcount#9 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock nl_table_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock nl_table_wait.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock lock#8 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock id_table_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &dir->lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock krc.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &ndev->lock &base->lock irq_context: 0 rtnl_mutex &ndev->lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)kblockd (work_completion)(&q->timeout_work) &obj_hash[i].lock irq_context: 0 (wq_completion)kblockd (work_completion)(&q->timeout_work) &base->lock irq_context: 0 (wq_completion)kblockd (work_completion)(&q->timeout_work) &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &base->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &journal->j_state_lock &journal->j_wait_reserved &p->pi_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &journal->j_state_lock &journal->j_wait_reserved &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &journal->j_state_lock &journal->j_wait_reserved &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &iint->mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &iint->mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &iint->mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock &local->rx_path_lock &c->lock irq_context: softirq rcu_read_lock &local->rx_path_lock &n->list_lock irq_context: softirq rcu_read_lock &local->rx_path_lock &n->list_lock &c->lock irq_context: softirq rcu_read_lock &local->rx_path_lock &local->queue_stop_reason_lock irq_context: softirq rcu_read_lock &local->rx_path_lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock &local->rx_path_lock hwsim_radio_lock irq_context: softirq rcu_read_lock &local->rx_path_lock hwsim_radio_lock pool_lock#2 irq_context: softirq rcu_read_lock &local->rx_path_lock hwsim_radio_lock &c->lock irq_context: softirq rcu_read_lock &local->rx_path_lock hwsim_radio_lock &list->lock#16 irq_context: softirq rcu_read_lock &local->rx_path_lock &list->lock#16 irq_context: softirq rcu_read_lock &sta->rate_ctrl_lock irq_context: softirq rcu_read_lock &sta->rate_ctrl_lock pool_lock#2 irq_context: softirq rcu_read_lock &sta->rate_ctrl_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &sta->rate_ctrl_lock krc.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 tomoyo_ss &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 clock-AF_INET irq_context: 0 cgroup_mutex krc.lock irq_context: 0 cgroup_mutex &rq->__lock irq_context: 0 cgroup_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 quarantine_lock irq_context: 0 sb_writers#8 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 prog_idr_lock &obj_hash[i].lock pool_lock irq_context: 0 rcu_read_lock rcu_read_lock icmp_global.lock irq_context: 0 rcu_read_lock rcu_read_lock icmp_global.lock batched_entropy_u8.lock irq_context: 0 &xt[i].mutex rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rcu_read_lock &q->lock#2 &base->lock irq_context: 0 rcu_read_lock &q->lock#2 &base->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock &q->lock#2 rcu_read_lock rcu_read_lock rhashtable_bucket irq_context: 0 rcu_read_lock &q->lock#2 rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 rcu_read_lock &q->lock#2 rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock &q->lock#2 rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rcu_read_lock &q->lock#2 rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rcu_read_lock &q->lock#2 rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock &q->lock#2 rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_IPGRE irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_IPGRE pool_lock#2 irq_context: 0 rtnl_mutex &tb->tb6_lock stock_lock irq_context: 0 &mm->mmap_lock &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 rtnl_mutex per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &____s->seqcount#2 irq_context: 0 rtnl_mutex &tb->tb6_lock stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 rtnl_mutex &tb->tb6_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock batched_entropy_u32.lock crngs.lock irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &ndev->lock &ifa->lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem quarantine_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock _xmit_IPGRE irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &sch->q.lock batched_entropy_u32.lock irq_context: 0 rtnl_mutex &dev->tx_global_lock _xmit_ETHER#2 irq_context: 0 namespace_sem stock_lock irq_context: 0 namespace_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 quarantine_lock irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: 0 namespace_sem &____s->seqcount#2 irq_context: 0 &type->s_umount_key#23/1 &xa->xa_lock#4 irq_context: 0 &type->s_umount_key#23/1 &xa->xa_lock#4 pool_lock#2 irq_context: 0 &type->s_umount_key#23/1 &obj_hash[i].lock irq_context: 0 &type->s_umount_key#23/1 stock_lock irq_context: softirq &(&ifa->dad_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pid_caches_mutex slab_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 pid_caches_mutex slab_mutex &rq->__lock irq_context: 0 pid_caches_mutex slab_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_INET slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock &c->lock irq_context: 0 rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock &rcu_state.gp_wq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_NETLINK rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 &type->s_umount_key#23/1 &xa->xa_lock#4 &c->lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 fs_reclaim irq_context: 0 sb_writers &type->i_mutex_dir_key#2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &xa->xa_lock#4 irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &obj_hash[i].lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 stock_lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &c->lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &dentry->d_lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key#2 tomoyo_ss irq_context: 0 sb_writers &type->i_mutex_dir_key#2 tomoyo_ss &rq->__lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 tomoyo_ss &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &type->i_mutex_dir_key#2 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &type->i_mutex_dir_key#2 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key#2 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key#2 tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &dentry->d_lock &wq#2 irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &sbinfo->stat_lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 pool_lock#2 irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &xa->xa_lock#4 pool_lock#2 irq_context: 0 sb_writers &type->i_mutex_dir_key#2 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &sb->s_type->i_lock_key#5 irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &s->s_inode_list_lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 tk_core.seq.seqcount irq_context: 0 sb_writers &type->i_mutex_dir_key#2 batched_entropy_u32.lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &sb->s_type->i_lock_key#5 &dentry->d_lock irq_context: 0 &sb->s_type->i_mutex_key#4 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 fs_reclaim irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &____s->seqcount irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 pool_lock#2 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 stock_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &xa->xa_lock#8 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 lock#4 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &info->lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &xa->xa_lock#8 stock_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &xa->xa_lock#8 pool_lock#2 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 lock#4 &lruvec->lru_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 key#9 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock &rq->__lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &xa->xa_lock#8 &c->lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex stack_depot_init_mutex stack_depot_init_mutex.wait_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex stack_depot_init_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex stack_depot_init_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem stack_depot_init_mutex.wait_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &sem->wait_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &pcp->lock &zone->lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &rcu_state.expedited_wq irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#9 remove_cache_srcu pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#9 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#9 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#9 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&q->timer) irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &wb->list_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &wb->list_lock &sb->s_type->i_lock_key#5 irq_context: 0 sb_writers &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&q->timer) rcu_read_lock &q->lock#2 irq_context: softirq (&q->timer) rcu_read_lock &q->lock#2 &obj_hash[i].lock irq_context: softirq (&q->timer) rcu_read_lock &q->lock#2 rcu_read_lock rcu_read_lock rhashtable_bucket irq_context: softirq (&q->timer) &obj_hash[i].lock irq_context: softirq (&q->timer) pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 rcu_read_lock_bh &sch->q.lock irq_context: 0 rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock irq_context: 0 rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: 0 &mq_lock irq_context: 0 (wq_completion)events free_ipc_work irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &pool->lock irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events free_ipc_work &obj_hash[i].lock irq_context: 0 (wq_completion)events free_ipc_work rcu_read_lock mount_lock irq_context: 0 (wq_completion)events free_ipc_work rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 (wq_completion)events free_ipc_work mount_lock irq_context: 0 (wq_completion)events free_ipc_work mount_lock mount_lock.seqcount irq_context: 0 (wq_completion)events free_ipc_work &fsnotify_mark_srcu irq_context: 0 (wq_completion)events free_ipc_work rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 shrinker_rwsem irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 rename_lock.seqcount irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 &dentry->d_lock irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 &sb->s_type->i_lock_key#20 irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 &s->s_inode_list_lock irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 &xa->xa_lock#8 irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 &obj_hash[i].lock irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 pool_lock#2 irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 &fsnotify_mark_srcu irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 sb_lock irq_context: 0 (wq_completion)events free_ipc_work unnamed_dev_ida.xa_lock irq_context: 0 (wq_completion)events free_ipc_work list_lrus_mutex irq_context: 0 (wq_completion)events free_ipc_work &xa->xa_lock#4 irq_context: 0 (wq_completion)events free_ipc_work &xa->xa_lock#4 &obj_hash[i].lock irq_context: 0 (wq_completion)events free_ipc_work &xa->xa_lock#4 pool_lock#2 irq_context: 0 (wq_completion)events free_ipc_work &rq->__lock irq_context: 0 (wq_completion)events free_ipc_work pool_lock#2 irq_context: 0 (wq_completion)events free_ipc_work sb_lock irq_context: 0 (wq_completion)events free_ipc_work sb_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events free_ipc_work sb_lock pool_lock#2 irq_context: 0 (wq_completion)events free_ipc_work mnt_id_ida.xa_lock irq_context: 0 (wq_completion)events free_ipc_work &ids->rwsem irq_context: 0 (wq_completion)events free_ipc_work (work_completion)(&ht->run_work) irq_context: 0 (wq_completion)events free_ipc_work &ht->mutex irq_context: 0 (wq_completion)events free_ipc_work &ht->mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events free_ipc_work &ht->mutex pool_lock#2 irq_context: 0 (wq_completion)events free_ipc_work percpu_counters_lock irq_context: 0 (wq_completion)events free_ipc_work pcpu_lock irq_context: 0 (wq_completion)events free_ipc_work sysctl_lock irq_context: 0 (wq_completion)events free_ipc_work sysctl_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events free_ipc_work sysctl_lock pool_lock#2 irq_context: 0 (wq_completion)events free_ipc_work sysctl_lock krc.lock irq_context: 0 (wq_completion)events free_ipc_work proc_inum_ida.xa_lock irq_context: 0 (wq_completion)events free_ipc_work stock_lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex (inet6addr_validator_chain).rwsem &rq->__lock irq_context: 0 rtnl_mutex (inet6addr_validator_chain).rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock remove_cache_srcu pool_lock#2 irq_context: 0 rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock &____s->seqcount#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock &pcp->lock &zone->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 rtnl_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 rtnl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rtnl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 namespace_sem &n->list_lock irq_context: 0 namespace_sem &n->list_lock &c->lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock kfence_freelist_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &____s->seqcount#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &____s->seqcount irq_context: 0 &type->i_mutex_dir_key#7 remove_cache_srcu irq_context: 0 &type->i_mutex_dir_key#7 remove_cache_srcu quarantine_lock irq_context: 0 &type->i_mutex_dir_key#7 remove_cache_srcu &c->lock irq_context: 0 &type->i_mutex_dir_key#7 remove_cache_srcu &n->list_lock irq_context: 0 &type->i_mutex_dir_key#7 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#7 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#7 remove_cache_srcu &rq->__lock irq_context: 0 &type->i_mutex_dir_key#7 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#7 rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#7 &sb->s_type->i_lock_key#31 &dentry->d_lock &wq#2 irq_context: 0 kn->active#61 &rq->__lock irq_context: 0 kn->active#61 fs_reclaim irq_context: 0 kn->active#61 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#61 stock_lock irq_context: 0 kn->active#61 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#61 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#61 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &p->lock stock_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sta->ampdu_mlme.mtx irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sta->ampdu_mlme.mtx &sta->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx (work_completion)(&sta->ampdu_mlme.work) irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock rhashtable_bucket irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_node_0 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sta->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx krc.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &local->key_mtx irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx fs_reclaim irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &fq->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &c->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx nl_table_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx nl_table_wait.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx pin_fs_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &dentry->d_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 pin_fs_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 mount_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 mount_lock mount_lock.seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &dentry->d_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &fsnotify_mark_srcu irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &xa->xa_lock#8 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock &dentry->d_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &fsnotify_mark_srcu irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_lock_key#7 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &s->s_inode_list_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &xa->xa_lock#8 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock mount_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx mount_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx mount_lock mount_lock.seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &local->active_txq_lock[i] irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx (work_completion)(&sta->drv_deliver_wk) irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &local->chanctx_mtx irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &ifibss->incomplete_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx lweventlist_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx lweventlist_lock &c->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx lweventlist_lock pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx lweventlist_lock &dir->lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx krc.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx hrtimer_bases.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &data->mutex irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &local->queue_stop_reason_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &fq->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &local->queue_stop_reason_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx krc.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_node_0 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &list->lock#15 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx (&ifibss->timer) irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &base->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock pool_lock#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock krc.lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)cfg80211 (work_completion)(&(&rdev->dfs_update_channels_wk)->work) irq_context: 0 (wq_completion)cfg80211 (work_completion)(&(&rdev->dfs_update_channels_wk)->work) rtnl_mutex irq_context: 0 (wq_completion)cfg80211 (work_completion)(&(&rdev->dfs_update_channels_wk)->work) rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&(&rdev->dfs_update_channels_wk)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&(&rdev->dfs_update_channels_wk)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->chanctx_mtx irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rdev->wiphy_work_lock irq_context: 0 rtnl_mutex &lock->wait_lock irq_context: 0 rtnl_mutex &list->lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_node_0 irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)cfg80211 (work_completion)(&(&rdev->dfs_update_channels_wk)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&(&rdev->dfs_update_channels_wk)->work) &p->pi_lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&(&rdev->dfs_update_channels_wk)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&(&rdev->dfs_update_channels_wk)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &ifibss->incomplete_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rnp->exp_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 fs_reclaim &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &wb->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 cb_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &rnp->exp_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &rnp->exp_wq[0] irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_node_0 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &list->lock#15 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx (&ifibss->timer) irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &base->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &base->lock &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &local->key_mtx irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->queue_stop_reason_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex rcu_node_0 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->mtx irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->sta_mtx irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx _xmit_ETHER irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx _xmit_ETHER &local->filter_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx _xmit_ETHER &local->filter_lock &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx _xmit_ETHER &local->filter_lock pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx _xmit_ETHER &local->filter_lock krc.lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx (&local->dynamic_ps_timer) irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx (work_completion)(&local->dynamic_ps_enable_work) irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx (work_completion)(&sdata->recalc_smps) irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx (work_completion)(&link->csa_finalize_work) irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx (work_completion)(&link->csa_finalize_work) &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx (work_completion)(&link->csa_finalize_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx (work_completion)(&link->color_change_finalize_work) irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx (work_completion)(&(&link->dfs_cac_timer_work)->work) irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex rcu_node_0 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &list->lock#15 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->filter_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ei->i_data_sem irq_context: 0 &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 &ei->i_data_sem pool_lock#2 irq_context: 0 &ei->i_data_sem &ei->i_es_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &xa->xa_lock#4 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 stock_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu quarantine_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu &c->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu &n->list_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &fq->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &local->queue_stop_reason_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &pa->pa_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &lg->lg_prealloc_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &pa->pa_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_IPGRE &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock key#7 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock key#8 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_reserved irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock &journal->j_wait_commit irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_checkpoint_mutex &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_wait_commit irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &journal->j_wait_done_commit irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock key#7 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &journal->j_state_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &journal->j_revoke_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem stock_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &xa->xa_lock#8 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem lock#4 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &journal->j_revoke_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &obj_hash[i].lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &bgl->locks[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &xa->xa_lock#8 stock_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &xa->xa_lock#8 pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &____s->seqcount#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex &n->list_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &table->hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &table->hash[i].lock &table->hash2[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-clock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &xa->xa_lock#8 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &fsnotify_mark_srcu irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-clock-AF_RXRPC irq_context: 0 pernet_ops_rwsem (wq_completion)krxrpcd irq_context: 0 pernet_ops_rwsem &wq->mutex irq_context: 0 pernet_ops_rwsem &wq->mutex &pool->lock/1 irq_context: 0 pernet_ops_rwsem &wq->mutex &x->wait#10 irq_context: 0 pernet_ops_rwsem rlock-AF_RXRPC irq_context: 0 pernet_ops_rwsem (&net->fs_probe_timer) irq_context: 0 pernet_ops_rwsem &net->cells_lock irq_context: 0 pernet_ops_rwsem (&net->cells_timer) irq_context: 0 pernet_ops_rwsem bit_wait_table + i irq_context: 0 pernet_ops_rwsem (&net->fs_timer) irq_context: 0 pernet_ops_rwsem ovs_mutex irq_context: 0 pernet_ops_rwsem ovs_mutex (work_completion)(&data->gc_work) irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex stock_lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex nf_hook_mutex irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex nf_hook_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex nf_hook_mutex irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex nf_hook_mutex cpu_hotplug_lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem ovs_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem ovs_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem ovs_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem ovs_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem ovs_mutex nf_connlabels_lock irq_context: 0 pernet_ops_rwsem ovs_mutex net_rwsem irq_context: 0 pernet_ops_rwsem (work_completion)(&(&ovs_net->masks_rebalance)->work) irq_context: 0 pernet_ops_rwsem (work_completion)(&ovs_net->dp_notify_work) irq_context: 0 pernet_ops_rwsem &srv->idr_lock irq_context: softirq &(&bat_priv->tt.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 pernet_ops_rwsem &pool->lock/1 irq_context: 0 pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock irq_context: 0 pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem (&rxnet->peer_keepalive_timer) irq_context: 0 pernet_ops_rwsem (work_completion)(&rxnet->peer_keepalive_work) irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock/1 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock krc.lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &nt->cluster_scope_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock krc.lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC rcu_read_lock rhashtable_bucket irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC k-clock-AF_TIPC irq_context: 0 pernet_ops_rwsem (work_completion)(&tn->work) irq_context: 0 pernet_ops_rwsem &rnp->exp_wq[2] irq_context: 0 pernet_ops_rwsem &tn->nametbl_lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 pernet_ops_rwsem (work_completion)(&(&c->work)->work) irq_context: 0 pernet_ops_rwsem (&rxnet->service_conn_reap_timer) irq_context: 0 pernet_ops_rwsem (wq_completion)krdsd irq_context: 0 pernet_ops_rwsem (work_completion)(&rtn->rds_tcp_accept_w) irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 &queue->rskq_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &____s->seqcount irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 elock-AF_INET6 irq_context: 0 pernet_ops_rwsem rds_tcp_conn_lock irq_context: 0 pernet_ops_rwsem loop_conns_lock irq_context: 0 pernet_ops_rwsem (wq_completion)l2tp irq_context: 0 &sb->s_type->i_mutex_key#10 &rnp->exp_wq[2] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &ul->lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex _xmit_NONE irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &obj_hash[i].lock pool_lock irq_context: softirq (&q->timer) rcu_read_lock &q->lock#2 rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: softirq (&q->timer) rcu_read_lock &q->lock#2 rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq (&q->timer) rcu_read_lock &q->lock#2 rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq (&q->timer) rcu_read_lock &q->lock#2 rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq (&q->timer) rcu_read_lock &q->lock#2 rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq (&q->timer) rcu_read_lock &q->lock#2 rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem batched_entropy_u8.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem kfence_freelist_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &meta->lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) kfence_freelist_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &rnp->exp_wq[3] irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem batched_entropy_u8.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem kfence_freelist_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &meta->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &bgl->locks[i].lock kfence_freelist_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &bgl->locks[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock kfence_freelist_lock irq_context: softirq (&peer->timer_persistent_keepalive) batched_entropy_u8.lock irq_context: softirq (&peer->timer_persistent_keepalive) kfence_freelist_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock remove_cache_srcu pool_lock#2 irq_context: 0 pernet_ops_rwsem sysctl_lock krc.lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem sysctl_lock krc.lock &base->lock irq_context: 0 pernet_ops_rwsem sysctl_lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events free_ipc_work &rnp->exp_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#32 rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#32 rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock krc.lock &base->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_TIPC &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#32 rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#32 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#32 rcu_read_lock &wb->work_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#32 rcu_read_lock &wb->work_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#32 rcu_read_lock &wb->work_lock &base->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#32 rcu_read_lock &wb->work_lock &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#32 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#32 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#32 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#32 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#32 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#32 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sb->s_type->i_lock_key#22 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &xa->xa_lock#8 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &____s->seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem lock#4 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem lock#4 &lruvec->lru_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem lock#5 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle lock#4 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle lock#5 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_es_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sbi->s_md_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &ei->i_prealloc_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem key#3 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &pa->pa_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) key#14 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#32 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &memcg->move_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#8 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#8 &s->s_inode_wblist_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &c->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &xa->xa_lock#8 key#10 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock key#10 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#32 &bdi->wb_waitq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#32 &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#32 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_raw_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem tk_core.seq.seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &dd->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem rcu_read_lock &dd->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem rcu_read_lock &c->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &bdi->wb_waitq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &bdi->wb_waitq &p->pi_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &bdi->wb_waitq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &bdi->wb_waitq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock &pa->pa_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#23/1 rcu_read_lock rcu_node_0 irq_context: 0 &type->s_umount_key#23/1 rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &mapping->i_mmap_rwsem ptlock_ptr(page)#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &mapping->private_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &memcg->move_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &xa->xa_lock#8 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &xa->xa_lock#8 &s->s_inode_wblist_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &____s->seqcount#2 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &____s->seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) rcu_node_0 irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &br->hash_lock &n->list_lock irq_context: 0 rtnl_mutex &br->hash_lock &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 fs_reclaim &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 cb_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 cb_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 cb_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &____s->seqcount#2 irq_context: 0 rcu_read_lock_bh &c->lock irq_context: 0 rcu_read_lock_bh &n->list_lock irq_context: 0 rcu_read_lock_bh &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock crngs.lock irq_context: 0 cb_lock genl_mutex fs_reclaim &rq->__lock irq_context: 0 cb_lock genl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &n->list_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 nl_table_wait.lock &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 cb_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 cb_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cb_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&hctx->run_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &(&hctx->run_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&hctx->run_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem key#3 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (work_completion)(&data->gc_work) irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 &f->f_owner.lock irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 slock-AF_INET6 irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 tk_core.seq.seqcount irq_context: 0 &pipe->mutex/1 slock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &base->lock &obj_hash[i].lock irq_context: softirq &ei->i_completed_io_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq &ei->i_completed_io_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_mutex_key#8 lock#5 &lruvec->lru_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem lock#5 &lruvec->lru_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&rxnet->peer_keepalive_timer) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 fs_reclaim &rq->__lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->s_umount_key#23/1 &xa->xa_lock#4 &n->list_lock irq_context: 0 &type->s_umount_key#23/1 &xa->xa_lock#4 &n->list_lock &c->lock irq_context: 0 &type->s_umount_key#23/1 &rq->__lock irq_context: 0 &type->s_umount_key#23/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: 0 pernet_ops_rwsem fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem ipvs->est_mutex &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem ipvs->est_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem ipvs->est_mutex pcpu_alloc_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem ipvs->est_mutex pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET fill_pool_map-wait-type-override &c->lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem fill_pool_map-wait-type-override &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock &____s->seqcount#2 irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal &pcp->lock &zone->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu &cfs_rq->removed.lock irq_context: softirq rcu_read_lock &local->rx_path_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock key#8 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock pcpu_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 rtnl_mutex &ul->lock#2 batched_entropy_u8.lock irq_context: 0 rtnl_mutex &ul->lock#2 kfence_freelist_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &fsnotify_mark_srcu &n->list_lock irq_context: 0 &fsnotify_mark_srcu &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_INET remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 sk_lock-AF_INET remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sk_lock-AF_INET remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_NETROM irq_context: 0 sk_lock-AF_NETROM slock-AF_NETROM irq_context: 0 sk_lock-AF_NETROM ax25_uid_lock irq_context: 0 sk_lock-AF_NETROM &rq->__lock irq_context: 0 sk_lock-AF_NETROM &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_NETROM nr_list_lock irq_context: 0 slock-AF_NETROM irq_context: 0 nr_list_lock irq_context: 0 sk_lock-AF_NETROM pool_lock#2 irq_context: 0 sk_lock-AF_NETROM &list->lock#38 irq_context: 0 sk_lock-AF_NETROM &obj_hash[i].lock irq_context: 0 sk_lock-AF_NETROM &base->lock irq_context: 0 sk_lock-AF_NETROM &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_NETROM &ei->socket.wq.wait irq_context: 0 sk_lock-AF_NETROM fs_reclaim irq_context: 0 sk_lock-AF_NETROM fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_NETROM &c->lock irq_context: 0 sk_lock-AF_NETROM &mm->mmap_lock irq_context: 0 sk_lock-AF_NETROM rcu_read_lock &ei->socket.wq.wait irq_context: 0 sk_lock-AF_NETROM rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 sk_lock-AF_NETROM rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_NETROM rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_NETROM rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_NETROM rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_NETROM rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_NETROM slock-AF_NETROM &sk->sk_lock.wq irq_context: 0 slock-AF_NETROM &sk->sk_lock.wq irq_context: 0 slock-AF_NETROM &sk->sk_lock.wq &p->pi_lock irq_context: 0 slock-AF_NETROM &sk->sk_lock.wq &p->pi_lock &rq->__lock irq_context: 0 slock-AF_NETROM &sk->sk_lock.wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq net/netrom/nr_loopback.c:18 irq_context: softirq net/netrom/nr_loopback.c:18 &list->lock#38 irq_context: softirq net/netrom/nr_loopback.c:18 nr_list_lock irq_context: softirq net/netrom/nr_loopback.c:18 pool_lock#2 irq_context: softirq net/netrom/nr_loopback.c:18 &pcp->lock &zone->lock irq_context: softirq net/netrom/nr_loopback.c:18 &____s->seqcount irq_context: softirq net/netrom/nr_loopback.c:18 rcu_read_lock pool_lock#2 irq_context: softirq net/netrom/nr_loopback.c:18 &obj_hash[i].lock irq_context: softirq net/netrom/nr_loopback.c:18 &base->lock irq_context: softirq net/netrom/nr_loopback.c:18 &base->lock &obj_hash[i].lock irq_context: softirq net/netrom/nr_loopback.c:18 slock-AF_NETROM irq_context: softirq net/netrom/nr_loopback.c:18 slock-AF_NETROM &obj_hash[i].lock irq_context: softirq net/netrom/nr_loopback.c:18 slock-AF_NETROM &base->lock irq_context: softirq net/netrom/nr_loopback.c:18 slock-AF_NETROM &base->lock &obj_hash[i].lock irq_context: softirq net/netrom/nr_loopback.c:18 slock-AF_NETROM wlock-AF_NETROM irq_context: softirq net/netrom/nr_loopback.c:18 slock-AF_NETROM &list->lock#20 irq_context: softirq net/netrom/nr_loopback.c:18 slock-AF_NETROM rcu_read_lock &ei->socket.wq.wait irq_context: softirq net/netrom/nr_loopback.c:18 slock-AF_NETROM rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: softirq net/netrom/nr_loopback.c:18 slock-AF_NETROM rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: softirq net/netrom/nr_loopback.c:18 slock-AF_NETROM rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock &n->list_lock &c->lock irq_context: 0 rcu_read_lock &vma->vm_lock->lock rcu_read_lock rcu_node_0 irq_context: 0 rcu_read_lock &vma->vm_lock->lock rcu_read_lock &rq->__lock irq_context: 0 rcu_read_lock &vma->vm_lock->lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock quarantine_lock irq_context: 0 (wq_completion)events free_ipc_work &type->s_umount_key#47 &obj_hash[i].lock pool_lock irq_context: 0 &u->iolock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sk_lock-AF_NETROM &n->list_lock irq_context: 0 sk_lock-AF_NETROM &n->list_lock &c->lock irq_context: 0 sk_lock-AF_NETROM fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 rtnl_mutex flowtable_lock rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex flowtable_lock rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex flowtable_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq net/netrom/nr_loopback.c:18 &c->lock irq_context: 0 sk_lock-AF_ALG sk_lock-AF_ALG/1 irq_context: 0 sk_lock-AF_ALG sk_lock-AF_ALG/1 slock-AF_ALG irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fs_reclaim &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal batched_entropy_u8.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal kfence_freelist_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &meta->lock irq_context: 0 tracepoints_mutex rcu_read_lock rcu_node_0 irq_context: 0 tracepoints_mutex rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_NETROM rcu_read_lock &ei->socket.wq.wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle rcu_read_lock &rcu_state.gp_wq irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 &zone->lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &zone->lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 remove_cache_srcu irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 remove_cache_srcu quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 remove_cache_srcu &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 remove_cache_srcu &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 remove_cache_srcu &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 sctp_assocs_id_lock &obj_hash[i].lock pool_lock irq_context: 0 rcu_read_lock &htab->buckets[i].lock irq_context: 0 rcu_read_lock &htab->buckets[i].lock &psock->link_lock irq_context: 0 rcu_read_lock &htab->buckets[i].lock &psock->link_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock &htab->buckets[i].lock &psock->link_lock pool_lock#2 irq_context: 0 rcu_read_lock &htab->buckets[i].lock clock-AF_UNIX irq_context: 0 rcu_read_lock &htab->buckets[i].lock &psock->ingress_lock irq_context: 0 rcu_read_lock &htab->buckets[i].lock &obj_hash[i].lock irq_context: 0 rcu_read_lock &htab->buckets[i].lock pool_lock#2 irq_context: 0 rcu_read_lock &htab->buckets[i].lock krc.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &c->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &idev->mc_report_lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &idev->mc_report_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &idev->mc_report_lock rcu_read_lock &pool->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &idev->mc_report_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &idev->mc_report_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &idev->mc_report_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &idev->mc_report_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &idev->mc_report_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_report_work)->work) irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_report_work)->work) &idev->mc_report_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_report_work)->work) &idev->mc_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_report_work)->work) &idev->mc_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_report_work)->work) &idev->mc_lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_report_work)->work) &idev->mc_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_report_work)->work) &idev->mc_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_INET6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex &n->list_lock irq_context: softirq (&ndev->rs_timer) init_task.mems_allowed_seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: softirq (&ndev->rs_timer) batched_entropy_u8.lock irq_context: softirq (&ndev->rs_timer) kfence_freelist_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 &meta->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 kfence_freelist_lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem quarantine_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &journal->j_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &rcu_state.expedited_wq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sock_diag_mutex irq_context: 0 sock_diag_mutex sock_diag_table_mutex irq_context: 0 sock_diag_mutex sock_diag_table_mutex inet_diag_table_mutex irq_context: 0 sock_diag_mutex fs_reclaim irq_context: 0 sock_diag_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sock_diag_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sock_diag_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sock_diag_mutex pool_lock#2 irq_context: 0 sock_diag_mutex rcu_read_lock pool_lock#2 irq_context: 0 sock_diag_mutex rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 sock_diag_mutex rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 sock_diag_mutex rlock-AF_NETLINK irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &pnn->pndevs.lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &pnn->pndevs.lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#5 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#5 rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &cfs_rq->removed.lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 cb_lock genl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rcu_read_lock &n->list_lock irq_context: 0 cb_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 &u->iolock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &meta->lock irq_context: 0 tracepoints_mutex remove_cache_srcu irq_context: 0 tracepoints_mutex remove_cache_srcu quarantine_lock irq_context: 0 tracepoints_mutex remove_cache_srcu &c->lock irq_context: 0 tracepoints_mutex remove_cache_srcu &rq->__lock irq_context: 0 tracepoints_mutex remove_cache_srcu &n->list_lock irq_context: 0 tracepoints_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 tracepoints_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 tracepoints_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem fs_reclaim irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &x->wait#9 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem (console_sem).lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &pdata->netdev_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &pdata->netdev_lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &pdata->netdev_lock &dir->lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem ndev_hash_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem crypto_alg_sem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem fs_reclaim irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem devices.xa_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rxe->usdev_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rxe->usdev_lock &pdata->netdev_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rxe->usdev_lock rtnl_mutex irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem &table->lock#4 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem &table->lock#4 &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem &table->lock#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem &table->lock#4 fs_reclaim irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem &table->lock#4 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem &table->lock#4 &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem &table->lock#4 pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem &table->lock#4 &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem &table->lock#4 &table->rwlock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem &table->lock#4 &device->event_handler_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem rcu_read_lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem &____s->seqcount irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem &ndev->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem &ndev->lock &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex net_rwsem &ndev->lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &device->cache_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rdmacg_mutex irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &k->list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem gdp_mutex irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem gdp_mutex &k->list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem gdp_mutex fs_reclaim irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sock_diag_mutex &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem gdp_mutex pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem gdp_mutex batched_entropy_u8.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem gdp_mutex kfence_freelist_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem gdp_mutex lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem gdp_mutex lock kernfs_idr_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem gdp_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem gdp_mutex &root->kernfs_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem gdp_mutex kobj_ns_type_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem lock kernfs_idr_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &root->kernfs_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem bus_type_sem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem sysfs_symlink_target_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &root->kernfs_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &dev->power.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem dpm_list_mtx irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem subsys mutex#84 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem subsys mutex#84 &k->k_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &zone->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &____s->seqcount irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem lock kernfs_idr_lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &sem->wait_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &p->pi_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &p->pi_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &root->kernfs_rwsem &sem->wait_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &root->kernfs_rwsem &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem lock kernfs_idr_lock &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &n->list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &n->list_lock &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rcu_read_lock rcu_node_0 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rcu_read_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem fs_reclaim &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &____s->seqcount#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) &zone->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) &zone->lock &____s->seqcount irq_context: 0 &pipe->mutex/1 &u->iolock irq_context: 0 &pipe->mutex/1 &u->iolock &rq->__lock irq_context: 0 &pipe->mutex/1 &u->iolock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &u->iolock rlock-AF_UNIX irq_context: 0 &pipe->mutex/1 &ei->socket.wq.wait irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &obj_hash[i].lock pool_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rq->__lock &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rq->__lock &base->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rcu_read_lock &pool->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)infiniband irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 fs_reclaim irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 pool_lock#2 irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &rxe->usdev_lock irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &rxe->usdev_lock &pdata->netdev_lock irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &rxe->usdev_lock rtnl_mutex irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &device->cache_lock irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &obj_hash[i].lock irq_context: 0 (wq_completion)infiniband (work_completion)(&work->work)#2 &device->event_handler_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem (console_sem).lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem console_lock console_srcu console_owner_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem console_lock console_srcu console_owner irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem console_lock console_srcu console_owner &port_lock_key irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem console_lock console_srcu console_owner console_owner_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &xa->xa_lock#16 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &xa->xa_lock#16 pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem fs_reclaim irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &n->list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &n->list_lock &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &xa->xa_lock#17 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &xa->xa_lock#17 pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &xa->xa_lock#18 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &xa->xa_lock#17 &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem crngs.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem free_vmap_area_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem vmap_area_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &____s->seqcount irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem init_mm.page_table_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &cq->cq_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem stock_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &sb->s_type->i_lock_key#8 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &dir->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &qp->state_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &xa->xa_lock#18 pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &xa->xa_lock#18 &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &xa->xa_lock#18 batched_entropy_u8.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &xa->xa_lock#18 kfence_freelist_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem cpu_hotplug_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem cpu_hotplug_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem cpu_hotplug_lock wq_pool_mutex irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem cpu_hotplug_lock wq_pool_mutex fs_reclaim irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem cpu_hotplug_lock wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem cpu_hotplug_lock wq_pool_mutex pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem cpu_hotplug_lock wq_pool_mutex &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem cpu_hotplug_lock wq_pool_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem cpu_hotplug_lock wq_pool_mutex &wq->mutex irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem cpu_hotplug_lock wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem kthread_create_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &p->pi_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &p->pi_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &x->wait irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem wq_pool_mutex irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem wq_pool_mutex &wq->mutex irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem ib_mad_port_list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &mad_queue->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &qp->rq.producer_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &____s->seqcount#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem rcu_read_lock rcu_node_0 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem rcu_read_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &pcp->lock &zone->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem ib_mad_clients.xa_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem ib_mad_clients.xa_lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &port_priv->reg_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem ib_agent_port_list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &xa->xa_lock#16 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &xa->xa_lock#16 &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &xa->xa_lock#16 pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &xa->xa_lock#16 &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock kernfs_idr_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &root->kernfs_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &root->kernfs_rwsem &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock kernfs_idr_lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &obj_hash[i].lock pool_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &port_priv->reg_lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &cm.device_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock#7 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock#7 fs_reclaim irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock#7 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock#7 pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock#7 &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock#7 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock#7 crngs.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock#7 &xa->xa_lock#18 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock#7 &id_priv->qp_mutex irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock#7 &id_priv->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock#7 &xa->xa_lock#19 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock#7 &xa->xa_lock#19 pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock#7 &cm_id_priv->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock#7 &cm_id_priv->lock &cm.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem lock#7 &xa->xa_lock#18 pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem umad_ida.xa_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &x->wait#9 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem chrdevs_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &k->list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem gdp_mutex irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem gdp_mutex &k->list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem gdp_mutex fs_reclaim irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem gdp_mutex &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem gdp_mutex &____s->seqcount irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem gdp_mutex pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem gdp_mutex rcu_read_lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem gdp_mutex &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem gdp_mutex lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem gdp_mutex lock kernfs_idr_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem gdp_mutex &root->kernfs_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem bus_type_sem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem sysfs_symlink_target_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &root->kernfs_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &root->kernfs_rwsem &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &dev->power.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem dpm_list_mtx irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem req_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &x->wait#11 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex fs_reclaim irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex nl_table_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex nl_table_wait.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem subsys mutex#85 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem subsys mutex#85 &k->k_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem pcpu_alloc_mutex irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem pcpu_alloc_mutex pcpu_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uverbs_ida.xa_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem rcu_read_lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem fs_reclaim &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem subsys mutex#86 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem subsys mutex#86 &k->k_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem subsys mutex#87 irq_context: 0 sb_writers &type->i_mutex_dir_key/1 &____s->seqcount#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem subsys mutex#87 &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem subsys mutex#87 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem subsys mutex#87 &k->k_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem (console_sem).lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem console_lock console_srcu console_owner_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem console_lock console_srcu console_owner irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem console_lock console_srcu console_owner &port_lock_key irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem console_lock console_srcu console_owner console_owner_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem rds_ib_devices_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem rds_ib_devices_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem rds_ib_devices_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem ib_nodev_conns_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem smc_ib_devices.mutex irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &device->event_handler_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &device->event_handler_rwsem &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &device->event_handler_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &u->iolock stock_lock irq_context: 0 &pipe->mutex/1 &u->iolock &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 &u->iolock pool_lock#2 irq_context: 0 &pipe->mutex/1 &pipe->mutex#2/2 irq_context: 0 &pipe->mutex#2/2 irq_context: 0 &pipe->mutex#2/2 &lock->wait_lock irq_context: 0 &pipe->mutex#2/2 &p->pi_lock irq_context: 0 &pipe->mutex#2/2 &p->pi_lock &rq->__lock irq_context: 0 &pipe->mutex#2/2 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex#2/2 &rq->__lock irq_context: 0 &pipe->mutex#2/2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &pnettable->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem rcu_read_lock &pool->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) &rxe->usdev_lock irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) &rxe->usdev_lock &pdata->netdev_lock irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) &rxe->usdev_lock rtnl_mutex irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) &table->rwlock irq_context: 0 (wq_completion)events (work_completion)(&smcibdev->port_event_work) smc_lgr_list.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &xa->xa_lock#16 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &xa->xa_lock#16 pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &x->wait#9 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &k->list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex gdp_mutex irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex gdp_mutex &k->list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex bus_type_sem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex sysfs_symlink_target_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &____s->seqcount#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &____s->seqcount irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &dev->power.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex dpm_list_mtx irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex fs_reclaim irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex nl_table_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex subsys mutex#84 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex subsys mutex#84 &k->k_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock &pdata->netdev_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock rtnl_mutex irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock rtnl_mutex &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &zone->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex batched_entropy_u8.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex kfence_freelist_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &n->list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &n->list_lock &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock rcu_node_0 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_node_0 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu quarantine_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu &n->list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock &____s->seqcount#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock &____s->seqcount irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &cfs_rq->removed.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &sem->wait_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &p->pi_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &p->pi_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_RFCOMM irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_RFCOMM slock-AF_BLUETOOTH-BTPROTO_RFCOMM irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_RFCOMM rfcomm_sk_list.lock irq_context: 0 slock-AF_BLUETOOTH-BTPROTO_RFCOMM irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 key#24 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &root->kernfs_rwsem &p->pi_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &pcp->lock &zone->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem nf_hook_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &sem->wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock &n->list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock &n->list_lock &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &pcp->lock &zone->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem uevent_sock_mutex irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem uevent_sock_mutex fs_reclaim irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem uevent_sock_mutex pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem uevent_sock_mutex &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem uevent_sock_mutex nl_table_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem uevent_sock_mutex nl_table_wait.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &pipe->mutex/1 &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) crngs.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem &pdata->netdev_lock irq_context: 0 rtnl_mutex (&brmctx->ip4_mc_router_timer) irq_context: 0 rtnl_mutex (&brmctx->ip4_other_query.timer) irq_context: 0 rtnl_mutex (&brmctx->ip4_own_query.timer) irq_context: 0 rtnl_mutex (&brmctx->ip6_mc_router_timer) irq_context: 0 rtnl_mutex (&brmctx->ip6_other_query.timer) irq_context: 0 rtnl_mutex (&brmctx->ip6_own_query.timer) irq_context: 0 rtnl_mutex (work_completion)(&(&br->gc_work)->work) irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem &table->lock#4 irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key krc.lock irq_context: 0 rtnl_mutex &br->multicast_lock irq_context: 0 rtnl_mutex (work_completion)(&br->mcast_gc_work) irq_context: 0 rtnl_mutex (work_completion)(&br->mcast_gc_work) &rq->__lock irq_context: 0 rtnl_mutex (work_completion)(&br->mcast_gc_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_state.barrier_mutex irq_context: 0 rtnl_mutex rcu_state.barrier_mutex rcu_state.barrier_lock irq_context: 0 rtnl_mutex rcu_state.barrier_mutex rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_state.barrier_mutex &x->wait#24 irq_context: 0 rtnl_mutex rcu_state.barrier_mutex &rq->__lock irq_context: 0 rtnl_mutex rcu_state.barrier_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex (work_completion)(&ht->run_work) irq_context: 0 rtnl_mutex &ht->mutex irq_context: 0 rtnl_mutex &ht->mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex &ht->mutex pool_lock#2 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_node_0 irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) &pcp->lock &zone->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &n->list_lock &c->lock irq_context: 0 &pipe->mutex/1 &sighand->siglock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &rcu_state.expedited_wq irq_context: 0 rtnl_mutex rcu_state.barrier_mutex rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_state.barrier_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[2] &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock irq_context: 0 rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 lock link_idr_lock &n->list_lock irq_context: 0 lock link_idr_lock &n->list_lock &c->lock irq_context: 0 sb_writers#4 sb_internal batched_entropy_u8.lock irq_context: 0 sb_writers#4 sb_internal kfence_freelist_lock irq_context: 0 sb_writers#4 sb_internal &meta->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss pgd_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss stock_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss key irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss pcpu_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss percpu_counters_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss pcpu_lock stock_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &cfs_rq->removed.lock irq_context: softirq &x->lock &net->xfrm.xfrm_state_lock irq_context: softirq &x->lock xfrm_state_gc_lock irq_context: softirq &x->lock rcu_read_lock &pool->lock irq_context: softirq &x->lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &x->lock rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq &x->lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &x->lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &x->lock rcu_read_lock pool_lock#2 irq_context: softirq &x->lock rcu_read_lock nl_table_lock irq_context: softirq &x->lock rcu_read_lock &obj_hash[i].lock irq_context: softirq &x->lock rcu_read_lock nl_table_wait.lock irq_context: 0 sk_lock-AF_INET stock_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock tcp_metrics_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock tcp_metrics_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock clock-AF_INET irq_context: 0 sk_lock-AF_INET rcu_read_lock clock-AF_INET &c->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock clock-AF_INET &n->list_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock clock-AF_INET &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock clock-AF_INET pool_lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock clock-AF_INET &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &stab->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &stab->lock &psock->link_lock irq_context: 0 cb_lock remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 cb_lock remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 crypto_alg_sem (crypto_chain).rwsem rcu_read_lock &pool->lock irq_context: 0 crypto_alg_sem (crypto_chain).rwsem rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 crypto_alg_sem (crypto_chain).rwsem rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 crypto_alg_sem (crypto_chain).rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 crypto_alg_sem (crypto_chain).rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 crypto_alg_sem (crypto_chain).rwsem &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&crct10dif_rehash_work) irq_context: 0 (wq_completion)events (work_completion)(&crct10dif_rehash_work) crc_t10dif_mutex irq_context: 0 (wq_completion)events (work_completion)(&crct10dif_rehash_work) crc_t10dif_mutex crypto_alg_sem irq_context: 0 (wq_completion)events (work_completion)(&crct10dif_rehash_work) crc_t10dif_mutex crypto_alg_sem crypto_alg_sem.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&crct10dif_rehash_work) crc_t10dif_mutex crypto_alg_sem &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&crct10dif_rehash_work) crc_t10dif_mutex crypto_alg_sem &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&crct10dif_rehash_work) crc_t10dif_mutex crypto_alg_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 crypto_alg_sem.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&crct10dif_rehash_work) crc_t10dif_mutex fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&crct10dif_rehash_work) crc_t10dif_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&crct10dif_rehash_work) crc_t10dif_mutex pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&crct10dif_rehash_work) rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&crct10dif_rehash_work) rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&crct10dif_rehash_work) rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&crct10dif_rehash_work) rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&crct10dif_rehash_work) rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)events (work_completion)(&crct10dif_rehash_work) rcu_state.exp_mutex &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&crct10dif_rehash_work) rcu_state.exp_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&crct10dif_rehash_work) rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&crct10dif_rehash_work) rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&crct10dif_rehash_work) rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&crct10dif_rehash_work) rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&crct10dif_rehash_work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&crct10dif_rehash_work) pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &rnp->exp_wq[3] irq_context: 0 rtnl_mutex &rnp->exp_wq[3] irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle rcu_node_0 irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults tk_core.seq.seqcount irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &c->lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults pool_lock#2 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &journal->j_state_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &rq->__lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults jbd2_handle irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults jbd2_handle &ei->i_raw_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults jbd2_handle &journal->j_wait_updates irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &obj_hash[i].lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &memcg->move_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &xa->xa_lock#8 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &mapping->private_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock &mapping->private_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock &folio_wait_table[i] irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &xa->xa_lock#8 key#10 irq_context: 0 sk_lock-AF_ALG quarantine_lock irq_context: 0 sk_lock-AF_ALG rcu_read_lock &ei->socket.wq.wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_ALG remove_cache_srcu irq_context: 0 sk_lock-AF_ALG remove_cache_srcu quarantine_lock irq_context: 0 sk_lock-AF_ALG remove_cache_srcu &c->lock irq_context: 0 sk_lock-AF_ALG remove_cache_srcu &n->list_lock irq_context: 0 sk_lock-AF_ALG remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_ALG remove_cache_srcu &obj_hash[i].lock irq_context: 0 sk_lock-AF_ALG remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_ALG remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG remove_cache_srcu pool_lock#2 irq_context: 0 sk_lock-AF_ALG batched_entropy_u8.lock irq_context: 0 sk_lock-AF_ALG kfence_freelist_lock irq_context: 0 sk_lock-AF_ALG &meta->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sk_lock-AF_ALG rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sk_lock-AF_ALG rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sk_lock-AF_ALG rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock &psock->link_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock &stab->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock &stab->lock &psock->link_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &psock->ingress_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) pool_lock irq_context: 0 sb_writers#3 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#3 rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET (&tw->tw_timer) irq_context: 0 sk_lock-AF_INET rcu_read_lock fastopen_seqlock.seqcount irq_context: 0 &pipe->mutex/1 &pipe->mutex#2/2 &lock->wait_lock irq_context: 0 &pipe->mutex/1 &pipe->mutex#2/2 &rq->__lock irq_context: 0 &pipe->mutex/1 &pipe->mutex#2/2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults remove_cache_srcu irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults remove_cache_srcu quarantine_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults remove_cache_srcu &c->lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults remove_cache_srcu &n->list_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults remove_cache_srcu &obj_hash[i].lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults remove_cache_srcu &rq->__lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_ALG rcu_read_lock &rcu_state.gp_wq irq_context: 0 sk_lock-AF_ALG rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sk_lock-AF_ALG rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &p->pi_lock irq_context: 0 &pipe->mutex/1 &p->pi_lock &rq->__lock irq_context: 0 &pipe->mutex/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_node_0 irq_context: 0 kn->active#5 fs_reclaim &rq->__lock irq_context: 0 tomoyo_ss &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock sb_pagefaults mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG fs_reclaim &rq->__lock irq_context: 0 sk_lock-AF_ALG fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle remove_cache_srcu irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#8 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG &pcp->lock &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle batched_entropy_u8.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle kfence_freelist_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &____s->seqcount#2 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#13 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex remove_cache_srcu &rq->__lock irq_context: 0 &iint->mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &journal->j_state_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_state_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &journal->j_state_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &journal->j_state_lock &base->lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults jbd2_handle &ret->b_state_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults jbd2_handle &rq->__lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG rcu_node_0 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_ALG remove_cache_srcu &rq->__lock irq_context: 0 sk_lock-AF_ALG remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &folio_wait_table[i] irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events_freezable (work_completion)(&vb->update_balloon_stats_work) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tracepoints_mutex remove_cache_srcu pool_lock#2 irq_context: 0 tracepoints_mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 tracepoints_mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 tracepoints_mutex remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET &sb->s_type->i_lock_key#8 irq_context: 0 sk_lock-AF_INET &dir->lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET/1 irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET/1 k-slock-AF_INET irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET/1 pool_lock#2 irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET/1 &dir->lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET/1 fs_reclaim irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET/1 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET/1 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET k-slock-AF_INET irq_context: 0 sk_lock-AF_INET k-clock-AF_INET irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET fs_reclaim irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &c->lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET pool_lock#2 irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET crngs.lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &token_hash[i].lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock &dir->lock#2 irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock &ul->lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &rq->__lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &____s->seqcount#8 irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &hashinfo->ehash_locks[i] irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET batched_entropy_u32.lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET batched_entropy_u16.lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET/1 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET/1 &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET/1 pool_lock#2 irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &base->lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET slock-AF_INET irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock tcp_metrics_lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock tcp_metrics_lock &c->lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock tcp_metrics_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock fastopen_seqlock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock fastopen_seqlock fastopen_seqlock.seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 batched_entropy_u16.lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock &dir->lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock &ul->lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &____s->seqcount irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET slock-AF_INET irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET slock-AF_INET &sk->sk_lock.wq irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET slock-AF_INET rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&msk->work) slock-AF_INET irq_context: softirq (&icsk->icsk_delack_timer) k-slock-AF_INET irq_context: softirq (&icsk->icsk_delack_timer) k-slock-AF_INET tk_core.seq.seqcount irq_context: softirq (&icsk->icsk_delack_timer) k-slock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq (&icsk->icsk_delack_timer) k-slock-AF_INET rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq (&icsk->icsk_delack_timer) k-slock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq (&icsk->icsk_delack_timer) k-slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock batched_entropy_u32.lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex remove_cache_srcu pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rhashtable_bucket rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rhashtable_bucket rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &rq->__lock irq_context: 0 sk_lock-AF_INET6 (console_sem).lock irq_context: 0 sk_lock-AF_INET6 console_lock console_srcu console_owner_lock irq_context: 0 sk_lock-AF_INET6 console_lock console_srcu console_owner irq_context: 0 sk_lock-AF_INET6 console_lock console_srcu console_owner &port_lock_key irq_context: 0 sk_lock-AF_ALG rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sk_lock-AF_INET6 console_lock console_srcu console_owner console_owner_lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock (&tw->tw_timer) irq_context: softirq rcu_read_lock rcu_read_lock &hashinfo->ehash_locks[i] irq_context: softirq rcu_read_lock rcu_read_lock &tcp_hashinfo.bhash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: softirq rcu_read_lock rcu_read_lock &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &hashinfo->ehash_locks[i] irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 sb_writers#4 inode_hash_lock &s->s_inode_list_lock irq_context: 0 sb_writers#4 (console_sem).lock irq_context: 0 sb_writers#4 console_lock console_srcu console_owner_lock irq_context: 0 sb_writers#4 console_lock console_srcu console_owner irq_context: 0 sb_writers#4 console_lock console_srcu console_owner &port_lock_key irq_context: 0 sb_writers#4 console_lock console_srcu console_owner console_owner_lock irq_context: 0 sb_writers#4 &sb->s_type->i_lock_key#22 &lru->node[i].lock irq_context: 0 sb_writers#4 jbd2_handle irq_context: 0 sb_writers#4 &journal->j_wait_reserved irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem quarantine_lock irq_context: 0 sb_writers#4 &journal->j_state_lock &journal->j_wait_updates irq_context: 0 sb_writers#4 &journal->j_barrier irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_state_lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_state_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &journal->j_barrier &rq->__lock irq_context: 0 sb_writers#4 &journal->j_barrier &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_state_lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_list_lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_state_lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex tk_core.seq.seqcount irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock &q->requeue_lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &rq->__lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &x->wait#26 irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex pool_lock#2 irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &dd->lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &obj_hash[i].lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &pool->lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex bit_wait_table + i irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_state_lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_list_lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &dd->lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_list_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_list_lock pool_lock#2 irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &base->lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex (&timer.timer) irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &c->lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex &____s->seqcount irq_context: 0 sb_writers#4 &journal->j_wait_transaction_locked irq_context: 0 sb_writers#4 &journal->j_wait_transaction_locked &p->pi_lock irq_context: 0 sb_writers#4 &journal->j_wait_transaction_locked &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &journal->j_wait_transaction_locked &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &obj_hash[i].lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound connector_reaper_work rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 tk_core.seq.seqcount irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 pool_lock#2 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 &obj_hash[i].lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 &base->lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock &ei->socket.wq.wait irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 &base->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &n->list_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &n->list_lock &c->lock irq_context: 0 sb_writers#4 console_owner_lock irq_context: 0 sb_writers#4 console_owner irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &journal->j_state_lock &journal->j_wait_reserved &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &journal->j_barrier jbd2_handle irq_context: 0 sb_writers#4 &journal->j_barrier jbd2_handle &rq->__lock irq_context: 0 sb_writers#4 &journal->j_barrier jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_wait_commit irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_wait_done_commit irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 k-slock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-slock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &sb->s_type->i_lock_key#8 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &xa->xa_lock#8 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &fsnotify_mark_srcu irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 k-clock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 k-slock-AF_INET &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 k-slock-AF_INET pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 k-slock-AF_INET krc.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 k-slock-AF_INET elock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &dir->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &token_hash[i].lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 &mm->mmap_lock sb_pagefaults &journal->j_wait_transaction_locked irq_context: 0 sk_lock-AF_INET6 rcu_read_lock fastopen_seqlock.seqcount irq_context: 0 sb_writers#4 &journal->j_wait_transaction_locked &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &obj_hash[i].lock irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle pool_lock#2 irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock fastopen_seqlock.seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET/1 tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET/1 slock-AF_INET irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET/1 rcu_read_lock &pool->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET/1 rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET/1 &base->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET/1 &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sk_lock-AF_INET6 krc.lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 krc.lock &base->lock irq_context: 0 sk_lock-AF_INET6 krc.lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 fill_pool_map-wait-type-override &c->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 fill_pool_map-wait-type-override pool_lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 cb_lock genl_mutex &pn->l2tp_tunnel_idr_lock irq_context: 0 cb_lock genl_mutex &pn->l2tp_tunnel_idr_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex stock_lock irq_context: 0 cb_lock genl_mutex mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex &sb->s_type->i_lock_key#8 irq_context: 0 cb_lock genl_mutex &dir->lock irq_context: 0 cb_lock genl_mutex l2tp_ip_lock irq_context: 0 cb_lock genl_mutex k-sk_lock-AF_INET irq_context: 0 cb_lock genl_mutex k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 cb_lock genl_mutex k-slock-AF_INET irq_context: 0 cb_lock genl_mutex k-clock-AF_INET irq_context: 0 cb_lock genl_mutex &xa->xa_lock#8 irq_context: 0 cb_lock genl_mutex &fsnotify_mark_srcu irq_context: 0 cb_lock genl_mutex &pn->l2tp_tunnel_idr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->roc_work)->work) irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->roc_work)->work) &local->mtx irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->roc_work)->work) &local->mtx &lock->wait_lock irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->roc_work)->work) &local->mtx &rq->__lock irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->roc_work)->work) &local->mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->roc_work)->work) &local->mtx &data->mutex irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->roc_work)->work) &local->mtx &obj_hash[i].lock irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->roc_work)->work) &local->mtx &base->lock irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->roc_work)->work) &local->mtx &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->roc_work)->work) &local->mtx fs_reclaim irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->roc_work)->work) &local->mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->roc_work)->work) &local->mtx nl_table_lock irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->roc_work)->work) &local->mtx nl_table_wait.lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx remove_cache_srcu irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx remove_cache_srcu quarantine_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx remove_cache_srcu &c->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx remove_cache_srcu &n->list_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx remove_cache_srcu &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx remove_cache_srcu pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx remove_cache_srcu &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->roc_work)->work) &local->mtx &c->lock irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->roc_work)->work) &local->mtx &n->list_lock irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->roc_work)->work) &local->mtx &n->list_lock &c->lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 cgroup_threadgroup_rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 cgroup_threadgroup_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cgroup_threadgroup_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cgroup_threadgroup_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &journal->j_wait_transaction_locked irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_wait_transaction_locked irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (reaper_work).work fill_pool_map-wait-type-override &rq->__lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->roc_work)->work) &local->mtx &____s->seqcount#2 irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->roc_work)->work) &local->mtx &____s->seqcount irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock &rq->__lock irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &xa->xa_lock#8 key#10 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &c->lock irq_context: 0 &group->mark_mutex lock &group->inotify_data.idr_lock &c->lock irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->roc_work)->work) &local->mtx &local->queue_stop_reason_lock irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->roc_work)->work) &local->mtx rcu_read_lock &fq->lock irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->roc_work)->work) &local->mtx rcu_read_lock &local->queue_stop_reason_lock irq_context: 0 &p->lock remove_cache_srcu rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &p->lock remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &p->lock remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &p->lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 &p->lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &p->lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &p->lock remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET/1 &c->lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) &hwstats->hwsdev_list_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 sctp_assocs_id_lock &c->lock irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock rcu_read_lock_bh &base->lock irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 sk_lock-AF_INET rcu_read_lock &pcp->lock &zone->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 rcu_read_lock rcu_read_lock_bh &list->lock#14 irq_context: 0 rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 key#22 irq_context: softirq &ei->i_completed_io_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: softirq &ei->i_completed_io_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &n->list_lock irq_context: softirq &ei->i_completed_io_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)phy12 (work_completion)(&(&local->roc_work)->work) &local->mtx batched_entropy_u8.lock irq_context: 0 (wq_completion)phy12 (work_completion)(&(&local->roc_work)->work) &local->mtx kfence_freelist_lock irq_context: 0 (wq_completion)phy12 (work_completion)(&(&local->roc_work)->work) &local->mtx &meta->lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &c->lock irq_context: softirq (&icsk->icsk_delack_timer) k-slock-AF_INET &c->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq rcu_read_lock_bh fill_pool_map-wait-type-override &c->lock irq_context: softirq rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)phy6 (work_completion)(&(&local->roc_work)->work) irq_context: 0 (wq_completion)phy6 (work_completion)(&(&local->roc_work)->work) &local->mtx irq_context: 0 (wq_completion)phy6 (work_completion)(&(&local->roc_work)->work) &local->mtx &lock->wait_lock irq_context: 0 (wq_completion)phy6 (work_completion)(&(&local->roc_work)->work) &local->mtx &rq->__lock irq_context: 0 (wq_completion)phy6 (work_completion)(&(&local->roc_work)->work) &local->mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy6 (work_completion)(&(&local->roc_work)->work) &local->mtx &obj_hash[i].lock irq_context: 0 (wq_completion)phy6 (work_completion)(&(&local->roc_work)->work) &local->mtx &base->lock irq_context: 0 (wq_completion)phy6 (work_completion)(&(&local->roc_work)->work) &local->mtx &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)phy6 (work_completion)(&(&local->roc_work)->work) &local->mtx fs_reclaim irq_context: 0 (wq_completion)phy6 (work_completion)(&(&local->roc_work)->work) &local->mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)phy6 (work_completion)(&(&local->roc_work)->work) &local->mtx nl_table_lock irq_context: 0 (wq_completion)phy6 (work_completion)(&(&local->roc_work)->work) &local->mtx nl_table_wait.lock irq_context: 0 cb_lock remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx nl_table_wait.lock &p->pi_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &cfs_rq->removed.lock irq_context: 0 cb_lock &rdev->wiphy.mtx &rq->__lock &cfs_rq->removed.lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock &rcu_state.gp_wq irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 delayed_uprobe_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem batched_entropy_u8.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem kfence_freelist_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &meta->lock irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock &pcp->lock &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle remove_cache_srcu &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle remove_cache_srcu &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle remove_cache_srcu &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle remove_cache_srcu &obj_hash[i].lock irq_context: softirq &(&local->roc_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)phy6 (work_completion)(&(&local->roc_work)->work) &local->mtx &c->lock irq_context: 0 (wq_completion)phy6 (work_completion)(&(&local->roc_work)->work) &local->mtx &____s->seqcount#2 irq_context: 0 (wq_completion)phy6 (work_completion)(&(&local->roc_work)->work) &local->mtx &____s->seqcount irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &dentry->d_lock &dentry->d_lock &lru->node[i].lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem inode_hash_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem fs_reclaim irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem stock_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem pool_lock#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem inode_hash_lock &sb->s_type->i_lock_key#31 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem inode_hash_lock &s->s_inode_list_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem tk_core.seq.seqcount irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem &sb->s_type->i_lock_key#31 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &sb->s_type->i_lock_key#31 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &sb->s_type->i_lock_key#31 &dentry->d_lock irq_context: 0 cb_lock genl_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 cb_lock genl_mutex fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock &cfs_rq->removed.lock irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->roc_work)->work) &local->mtx remove_cache_srcu irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->roc_work)->work) &local->mtx remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->roc_work)->work) &local->mtx remove_cache_srcu &c->lock irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->roc_work)->work) &local->mtx remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->roc_work)->work) &local->mtx remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_node_0 irq_context: 0 kn->active#5 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 kn->active#5 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex cgroup_mutex.wait_lock irq_context: 0 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_node_0 irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: softirq (&hsr->announce_timer) rcu_read_lock &pcp->lock &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 cb_lock genl_mutex &pn->l2tp_tunnel_idr_lock &c->lock irq_context: 0 (wq_completion)phy6 (work_completion)(&(&local->roc_work)->work) &local->mtx &n->list_lock irq_context: 0 (wq_completion)phy6 (work_completion)(&(&local->roc_work)->work) &local->mtx &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: softirq (&lapb->t1timer) &lapb->lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->roc_work)->work) &local->mtx batched_entropy_u8.lock irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->roc_work)->work) &local->mtx batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->roc_work)->work) &local->mtx kfence_freelist_lock irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->roc_work)->work) &local->mtx &meta->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_es_lock key#7 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 cb_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &root->kernfs_iattr_rwsem &rq->__lock irq_context: softirq (&timer) rcu_read_lock &____s->seqcount#2 irq_context: 0 cb_lock &rdev->wiphy.mtx &pcp->lock &zone->lock irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx &rq->__lock &cfs_rq->removed.lock irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &fsnotify_mark_srcu remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy14 (work_completion)(&(&local->roc_work)->work) &local->mtx quarantine_lock irq_context: 0 cb_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 &type->i_mutex_dir_key#7 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &type->i_mutex_dir_key#7 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#7 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#7 rcu_read_lock rcu_node_0 irq_context: 0 &type->i_mutex_dir_key#7 rcu_read_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#7 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &n->list_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex mrt_lock irq_context: 0 rtnl_mutex mrt_lock pool_lock#2 irq_context: 0 rtnl_mutex mrt_lock &dir->lock#2 irq_context: 0 rtnl_mutex &net->ipv4.ra_mutex irq_context: 0 &net->ipv4.ra_mutex irq_context: 0 &rq->__lock cid_lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) rcu_node_0 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex &sdata->sec_mtx irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &group->mark_mutex remove_cache_srcu &rq->__lock irq_context: 0 &group->mark_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 jbd2_handle &journal->j_state_lock irq_context: 0 sb_writers#4 jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 sb_writers#4 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 sb_writers#4 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(work) &rq->__lock irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_es_lock &base->lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_es_lock &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &cfs_rq->removed.lock irq_context: 0 tracepoints_mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PHONET &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u32.lock crngs.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &rq->__lock &cfs_rq->removed.lock irq_context: 0 tracepoints_mutex &obj_hash[i].lock pool_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex &k->list_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex &k->k_lock irq_context: 0 &iint->mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &iint->mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &n->list_lock &c->lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 irq_context: 0 rtnl_mutex sk_lock-AF_INET6 slock-AF_INET6 irq_context: 0 rtnl_mutex slock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 tcpv4_prot_mutex irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &meta->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &c->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &n->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &n->lock &____s->seqcount#9 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock nl_table_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock nl_table_wait.lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock rcu_read_lock lock#8 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock rcu_read_lock id_table_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &dir->lock#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock krc.lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &n->list_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &____s->seqcount#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)events (work_completion)(&w->w) nfc_devlist_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&w->w) nfc_devlist_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 &sbi->s_writepages_rwsem &folio_wait_table[i] &p->pi_lock irq_context: 0 &sbi->s_writepages_rwsem &folio_wait_table[i] &p->pi_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem &folio_wait_table[i] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET &cfs_rq->removed.lock irq_context: 0 &rdma_nl_types[idx].sem devices_rwsem irq_context: 0 &rdma_nl_types[idx].sem fs_reclaim irq_context: 0 &rdma_nl_types[idx].sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &rdma_nl_types[idx].sem fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem &device->client_data_rwsem irq_context: 0 &rdma_nl_types[idx].sem &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem rlock-AF_NETLINK irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &wq->mutex &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &wq->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &vma_lock->rw_sema irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &rq->__lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &vma_lock->rw_sema irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &vma_lock->rw_sema fs_reclaim irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &vma_lock->rw_sema fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &vma_lock->rw_sema fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &vma_lock->rw_sema fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &vma_lock->rw_sema &____s->seqcount irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &vma_lock->rw_sema pool_lock#2 irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &vma_lock->rw_sema stock_lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &vma_lock->rw_sema &mm->page_table_lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &vma_lock->rw_sema &resv_map->lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &vma_lock->rw_sema hugetlb_lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &vma_lock->rw_sema hugetlb_lock &____s->seqcount#2 irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &vma_lock->rw_sema &xa->xa_lock#8 irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &vma_lock->rw_sema &sb->s_type->i_lock_key#16 irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &vma_lock->rw_sema ptlock_ptr(page) irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &vma_lock->rw_sema &rq->__lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &vma_lock->rw_sema &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &vma_lock->rw_sema ptlock_ptr(page) irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &vma_lock->rw_sema &resv_map->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &vma_lock->rw_sema &resv_map->lock pool_lock#2 irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &vma_lock->rw_sema &xa->xa_lock#8 stock_lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &vma_lock->rw_sema &xa->xa_lock#8 pool_lock#2 irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &vma_lock->rw_sema &pcp->lock &zone->lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &vma_lock->rw_sema &c->lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &vma_lock->rw_sema rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &vma_lock->rw_sema rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &vma_lock->rw_sema &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &rdma_nl_types[idx].sem &c->lock irq_context: 0 &rdma_nl_types[idx].sem &n->list_lock irq_context: 0 &rdma_nl_types[idx].sem &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 batched_entropy_u8.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 kfence_freelist_lock irq_context: 0 &pipe->mutex/1 &____s->seqcount#2 irq_context: 0 &pipe->mutex/1 &f->f_lock irq_context: 0 &pipe->mutex/1 &f->f_lock fasync_lock irq_context: 0 sb_writers#4 &pipe->mutex/1 irq_context: 0 sb_writers#4 &pipe->rd_wait irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 &rdma_nl_types[idx].sem batched_entropy_u8.lock irq_context: 0 &rdma_nl_types[idx].sem kfence_freelist_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &c->lock irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 tk_core.seq.seqcount irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 &ei->xattr_sem irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 pool_lock#2 irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 &journal->j_state_lock irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 jbd2_handle irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 &obj_hash[i].lock irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 fs_reclaim irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_es_lock irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem pool_lock#2 irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &____s->seqcount#2 irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &ei->i_prealloc_lock irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &pa->pa_lock irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &mapping->private_lock irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex pool_lock#2 irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &ret->b_state_lock irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &pa->pa_lock irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex &lg->lg_prealloc_lock irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &obj_hash[i].lock irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_list_lock irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 &____s->seqcount irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 &dd->lock irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 rcu_read_lock &dd->lock irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 rcu_read_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 &base->lock irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 &rq->__lock irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 lock btf_idr_lock &c->lock irq_context: 0 sk_lock-AF_ALG &cfs_rq->removed.lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET batched_entropy_u32.lock irq_context: 0 rcu_read_lock &l->lock irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 (&timer.timer) irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 &journal->j_state_lock irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 jbd2_handle irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 &journal->j_wait_commit irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 &journal->j_wait_done_commit irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ret->b_state_lock irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &ret->b_state_lock irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &pipe->mutex/1 &obj_hash[i].lock irq_context: 0 sb_writers#4 &pipe->mutex/1 pool_lock#2 irq_context: 0 sb_writers#4 &pipe->mutex/1 rcu_read_lock &new->fa_lock irq_context: 0 sb_writers#4 &pipe->mutex/1 rcu_read_lock &new->fa_lock &f->f_owner.lock irq_context: 0 sb_writers#4 &pipe->mutex/1 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &pipe->mutex/1 rcu_read_lock &rq->__lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET tk_core.seq.seqcount irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET pool_lock#2 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET &obj_hash[i].lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET &base->lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 tracepoints_mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_CAN irq_context: 0 sk_lock-AF_CAN slock-AF_CAN irq_context: 0 sk_lock-AF_CAN fs_reclaim irq_context: 0 sk_lock-AF_CAN fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_CAN &____s->seqcount#2 irq_context: 0 sk_lock-AF_CAN &____s->seqcount irq_context: 0 sk_lock-AF_CAN pool_lock#2 irq_context: 0 sk_lock-AF_CAN &c->lock irq_context: 0 sk_lock-AF_CAN &net->can.rcvlists_lock irq_context: 0 slock-AF_CAN irq_context: 0 &so->wait irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN &net->can.rcvlists_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem batched_entropy_u8.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem kfence_freelist_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &tcp_hashinfo.bhash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 nfnl_subsys_ctnetlink nlk_cb_mutex-NETFILTER irq_context: 0 nfnl_subsys_ctnetlink nlk_cb_mutex-NETFILTER fs_reclaim irq_context: 0 nfnl_subsys_ctnetlink nlk_cb_mutex-NETFILTER fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 nfnl_subsys_ctnetlink nlk_cb_mutex-NETFILTER pool_lock#2 irq_context: 0 nfnl_subsys_ctnetlink nlk_cb_mutex-NETFILTER &cnet->ecache.dying_lock irq_context: 0 nfnl_subsys_ctnetlink nlk_cb_mutex-NETFILTER rcu_read_lock pool_lock#2 irq_context: 0 nfnl_subsys_ctnetlink nlk_cb_mutex-NETFILTER rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 nfnl_subsys_ctnetlink nlk_cb_mutex-NETFILTER rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 nfnl_subsys_ctnetlink nlk_cb_mutex-NETFILTER rlock-AF_NETLINK irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&bat_priv->mcast.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &pipe->mutex/1 &sighand->siglock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &pipe->mutex/1 &f->f_lock fasync_lock &new->fa_lock irq_context: 0 &pipe->mutex/1 &f->f_lock fasync_lock &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 &f->f_lock fasync_lock pool_lock#2 irq_context: 0 &mm->mmap_lock &vma_lock->rw_sema &hugetlbfs_i_mmap_rwsem_key &____s->seqcount irq_context: 0 &mm->mmap_lock &vma_lock->rw_sema &hugetlbfs_i_mmap_rwsem_key pool_lock#2 irq_context: 0 &hugetlb_fault_mutex_table[i] irq_context: 0 &hugetlb_fault_mutex_table[i] &sb->s_type->i_lock_key#16 irq_context: 0 &hugetlb_fault_mutex_table[i] &sb->s_type->i_lock_key#16 &xa->xa_lock#8 irq_context: 0 &hugetlb_fault_mutex_table[i] &sb->s_type->i_lock_key#16 &xa->xa_lock#8 &obj_hash[i].lock irq_context: 0 &hugetlb_fault_mutex_table[i] &sb->s_type->i_lock_key#16 &xa->xa_lock#8 pool_lock#2 irq_context: 0 &hugetlb_fault_mutex_table[i] &rq->__lock irq_context: 0 hugetlb_lock irq_context: 0 &resv_map->lock &obj_hash[i].lock irq_context: 0 &resv_map->lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock stock_lock irq_context: 0 rcu_read_lock rcu_read_lock &ul->lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 ip6_ra_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &meta->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &bgl->locks[i].lock kfence_freelist_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex kfence_freelist_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &cfs_rq->removed.lock irq_context: 0 &xt[i].mutex &mm->mmap_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex fs_reclaim irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex &c->lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex pool_lock#2 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex &obj_hash[i].lock irq_context: 0 nlk_cb_mutex-GENERIC fs_reclaim irq_context: 0 nlk_cb_mutex-GENERIC fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 nlk_cb_mutex-GENERIC fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 nlk_cb_mutex-GENERIC fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nlk_cb_mutex-GENERIC batched_entropy_u8.lock irq_context: 0 nlk_cb_mutex-GENERIC kfence_freelist_lock irq_context: 0 nlk_cb_mutex-GENERIC genl_mutex irq_context: 0 nlk_cb_mutex-GENERIC pool_lock#2 irq_context: 0 nlk_cb_mutex-GENERIC &c->lock irq_context: 0 nlk_cb_mutex-GENERIC &____s->seqcount#2 irq_context: 0 nlk_cb_mutex-GENERIC &____s->seqcount irq_context: 0 nlk_cb_mutex-GENERIC genl_mutex &rq->__lock irq_context: 0 nlk_cb_mutex-GENERIC genl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &base->lock irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 nlk_cb_mutex-GENERIC &n->list_lock irq_context: 0 nlk_cb_mutex-GENERIC &n->list_lock &c->lock irq_context: 0 nlk_cb_mutex-GENERIC rcu_read_lock &c->lock irq_context: 0 nlk_cb_mutex-GENERIC rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 nlk_cb_mutex-GENERIC rlock-AF_NETLINK irq_context: 0 nlk_cb_mutex-GENERIC genl_mutex &obj_hash[i].lock irq_context: 0 nlk_cb_mutex-GENERIC genl_mutex pool_lock#2 irq_context: 0 nlk_cb_mutex-GENERIC &obj_hash[i].lock irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 &c->lock irq_context: 0 &sb->s_type->i_mutex_key#12 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 nlk_cb_mutex-GENERIC rcu_read_lock &n->list_lock irq_context: 0 nlk_cb_mutex-GENERIC rcu_read_lock &n->list_lock &c->lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sem->wait_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_raw_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_lock_key#22 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_lock_key#22 &xa->xa_lock#8 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 lock#4 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 lock#4 &lruvec->lru_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 lock#5 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &lruvec->lru_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &dd->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &dd->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &base->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 (&timer.timer) irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_wait_commit irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_wait_done_commit irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &meta->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &n->list_lock &c->lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &n->list_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 sb_writers#8 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &mm->mmap_lock &hugetlb_fault_mutex_table[i] &vma_lock->rw_sema rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &pipe->wr_wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 sb_internal rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 sb_internal rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &pipe->mutex/1 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &dentry->d_lock irq_context: 0 sb_writers tomoyo_ss irq_context: 0 sb_writers tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 sb_writers tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers tomoyo_ss &obj_hash[i].lock irq_context: 0 sb_writers tomoyo_ss &c->lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &sb->s_type->i_lock_key#5 &xa->xa_lock#8 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 lock#5 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &lruvec->lru_lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &obj_hash[i].lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &sb->s_type->i_lock_key#5 &xa->xa_lock#8 &obj_hash[i].lock irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &sb->s_type->i_lock_key#5 &xa->xa_lock#8 pool_lock#2 irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &sb->s_type->i_lock_key#5 &xa->xa_lock#8 &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle irq_context: 0 sb_writers &sb->s_type->i_mutex_key#4 &info->lock key#9 irq_context: 0 sb_writers#4 mapping.invalidate_lock lock#4 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 mapping.invalidate_lock lock#4 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_es_lock key#6 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &obj_hash[i].lock pool_lock irq_context: 0 &journal->j_state_lock &journal->j_wait_done_commit &p->pi_lock irq_context: 0 &journal->j_state_lock &journal->j_wait_done_commit &p->pi_lock &rq->__lock irq_context: 0 &journal->j_state_lock &journal->j_wait_done_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle &journal->j_wait_updates &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &journal->j_state_lock &journal->j_wait_transaction_locked &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock key#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &n->list_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock &____s->seqcount irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ifibss->timer) irq_context: softirq (&ifibss->timer) &rdev->wiphy_work_lock irq_context: softirq (&ifibss->timer) rcu_read_lock &pool->lock irq_context: softirq (&ifibss->timer) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq (&ifibss->timer) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_wait_updates &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &cfs_rq->removed.lock irq_context: 0 &xa->xa_lock#8 batched_entropy_u8.lock irq_context: 0 &xa->xa_lock#8 kfence_freelist_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &meta->lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &base->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &vma_lock->rw_sema &hugetlbfs_i_mmap_rwsem_key &rq->__lock irq_context: 0 &mm->mmap_lock &vma_lock->rw_sema &hugetlbfs_i_mmap_rwsem_key &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &vma->vm_lock->lock &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &sbi->s_orphan_lock bit_wait_table + i irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &obj_hash[i].lock irq_context: 0 &p->lock remove_cache_srcu rcu_node_0 irq_context: 0 &fq->mq_flush_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &fq->mq_flush_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &fq->mq_flush_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_state_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: softirq rcu_read_lock &local->rx_path_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock irq_context: softirq rcu_read_lock &local->rx_path_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sk_lock-AF_LLC &c->lock irq_context: 0 sk_lock-AF_LLC wlock-AF_LLC irq_context: 0 sk_lock-AF_LLC rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 sk_lock-AF_LLC rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 sk_lock-AF_LLC rcu_read_lock_bh pool_lock#2 irq_context: 0 sk_lock-AF_LLC rcu_read_lock_bh &____s->seqcount irq_context: 0 sk_lock-AF_LLC rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_LLC &obj_hash[i].lock irq_context: 0 sk_lock-AF_LLC &base->lock irq_context: 0 sk_lock-AF_LLC &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_LLC &ei->socket.wq.wait irq_context: softirq &fq->mq_flush_lock fill_pool_map-wait-type-override &n->list_lock irq_context: softirq &fq->mq_flush_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sb_writers#8 tomoyo_ss remove_cache_srcu irq_context: 0 sb_writers#8 tomoyo_ss remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#8 tomoyo_ss remove_cache_srcu &c->lock irq_context: 0 sb_writers#8 tomoyo_ss remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#8 tomoyo_ss remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#8 tomoyo_ss remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#8 tomoyo_ss remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_LLC rcu_read_lock &ei->socket.wq.wait irq_context: 0 sk_lock-AF_LLC rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 sk_lock-AF_LLC rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_LLC rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_LLC rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_LLC rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_LLC rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_LLC slock-AF_LLC &sk->sk_lock.wq irq_context: 0 slock-AF_LLC &sk->sk_lock.wq irq_context: 0 slock-AF_LLC &sk->sk_lock.wq &p->pi_lock irq_context: 0 slock-AF_LLC &sk->sk_lock.wq &p->pi_lock &rq->__lock irq_context: 0 slock-AF_LLC &sk->sk_lock.wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 slock-AF_LLC &sk->sk_lock.wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &rq->__lock irq_context: 0 sk_lock-AF_PPPOX chan_lock irq_context: 0 sk_lock-AF_PPPOX rcu_state.exp_mutex rcu_node_0 irq_context: 0 sk_lock-AF_PPPOX rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 sk_lock-AF_PPPOX rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_PPPOX rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_PPPOX rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sk_lock-AF_PPPOX rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_PPPOX rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_PPPOX rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 sk_lock-AF_PPPOX rcu_state.exp_mutex &rq->__lock irq_context: 0 sk_lock-AF_PPPOX rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_PPPOX &obj_hash[i].lock irq_context: 0 rlock-AF_PPPOX irq_context: 0 sk_lock-AF_PPPOX rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 sb_writers#4 mapping.invalidate_lock fs_reclaim &rq->__lock irq_context: 0 sb_writers#4 mapping.invalidate_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 sk_lock-AF_LLC quarantine_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock &q->requeue_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &x->wait#26 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &journal->j_wait_updates &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 mapping.invalidate_lock batched_entropy_u8.lock irq_context: 0 sb_writers#4 mapping.invalidate_lock kfence_freelist_lock irq_context: 0 sk_lock-AF_LLC rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_LLC rcu_read_lock &rcu_state.gp_wq irq_context: 0 sk_lock-AF_LLC rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sk_lock-AF_LLC rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_LLC rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_LLC rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &list->lock#39 irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_LLC rcu_read_lock &ei->socket.wq.wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex sk_lock-AF_INET irq_context: 0 rtnl_mutex sk_lock-AF_INET slock-AF_INET irq_context: 0 rtnl_mutex sk_lock-AF_INET &mm->mmap_lock irq_context: 0 rtnl_mutex sk_lock-AF_INET fs_reclaim irq_context: 0 rtnl_mutex sk_lock-AF_INET fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex sk_lock-AF_INET pool_lock#2 irq_context: 0 rtnl_mutex sk_lock-AF_INET &obj_hash[i].lock irq_context: 0 rtnl_mutex sk_lock-AF_INET &rq->__lock irq_context: 0 rtnl_mutex sk_lock-AF_INET &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex sk_lock-AF_INET &in_dev->mc_tomb_lock irq_context: 0 rtnl_mutex sk_lock-AF_INET &im->lock irq_context: 0 rtnl_mutex sk_lock-AF_INET _xmit_ETHER irq_context: 0 rtnl_mutex sk_lock-AF_INET _xmit_ETHER pool_lock#2 irq_context: 0 rtnl_mutex sk_lock-AF_INET &base->lock irq_context: 0 rtnl_mutex sk_lock-AF_INET &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex sk_lock-AF_INET rcu_read_lock &im->lock irq_context: 0 rtnl_mutex sk_lock-AF_INET &im->lock pool_lock#2 irq_context: 0 rtnl_mutex sk_lock-AF_INET &im->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex sk_lock-AF_INET &im->lock &base->lock irq_context: 0 rtnl_mutex sk_lock-AF_INET &im->lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex slock-AF_INET irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rlock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex rcu_read_lock &im->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &im->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &im->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex krc.lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex _xmit_ETHER irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex _xmit_ETHER &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex _xmit_ETHER pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex _xmit_ETHER krc.lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &in_dev->mc_tomb_lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &in_dev->mc_tomb_lock pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &in_dev->mc_tomb_lock rcu_read_lock pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &in_dev->mc_tomb_lock rcu_read_lock &dir->lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &in_dev->mc_tomb_lock rcu_read_lock &ul->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rtnl_mutex &sdata->sec_mtx irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_lock_key#22 &xa->xa_lock#8 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 slock-AF_LLC &sk->sk_lock.wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->private_lock rcu_read_lock rcu_read_lock key#10 irq_context: softirq (&map->gc) irq_context: softirq (&map->gc) &set->lock irq_context: softirq (&map->gc) &obj_hash[i].lock irq_context: softirq (&map->gc) &base->lock irq_context: softirq (&map->gc) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 fs_reclaim &rq->__lock irq_context: 0 &group->mark_mutex &n->list_lock irq_context: 0 &group->mark_mutex &n->list_lock &c->lock irq_context: 0 sb_writers#4 sb_writers#4 jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) key#14 irq_context: 0 &sb->s_type->i_mutex_key#10 &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex key#3 irq_context: 0 (wq_completion)events &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &____s->seqcount#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &pcp->lock &zone->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &____s->seqcount irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#2 &c->lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#7 irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#7 &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#7 mode_list_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#7 (console_sem).lock irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#7 console_lock console_srcu console_owner_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#7 console_lock console_srcu console_owner irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#7 console_lock console_srcu console_owner &port_lock_key irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#7 console_lock console_srcu console_owner console_owner_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#7 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#7 fs_reclaim irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#7 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#7 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#7 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#7 pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#7 &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#7 &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#7 nl_table_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#7 nl_table_wait.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#5 fs_reclaim &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#7 &n->list_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#7 &n->list_lock &c->lock irq_context: 0 sb_writers#4 mapping.invalidate_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_lock_key#8 bit_wait_table + i irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &in_dev->mc_tomb_lock &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &in_dev->mc_tomb_lock &____s->seqcount irq_context: 0 sb_writers#4 &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 sk_lock-AF_LLC rcu_node_0 irq_context: softirq &fq->mq_flush_lock &x->wait#26 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#3 &nsim_trap_data->trap_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock &c->lock irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)kblockd (work_completion)(&(&q->requeue_work)->work) rcu_read_lock &____s->seqcount irq_context: 0 sb_writers#4 sb_writers#4 &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &mapping->private_lock irq_context: 0 sb_writers#4 jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_LLC rcu_read_lock_bh noop_qdisc.busylock irq_context: 0 sk_lock-AF_LLC rcu_read_lock_bh noop_qdisc.busylock noop_qdisc.q.lock irq_context: 0 sk_lock-AF_LLC &n->list_lock irq_context: 0 sk_lock-AF_LLC &n->list_lock &c->lock irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &mapping->private_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_LLC &meta->lock irq_context: 0 sk_lock-AF_LLC kfence_freelist_lock irq_context: 0 rtnl_mutex lock kernfs_idr_lock &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#8 irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#8 mode_list_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#8 (console_sem).lock irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#8 console_lock console_srcu console_owner_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#8 console_lock console_srcu console_owner irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#8 console_lock console_srcu console_owner &port_lock_key irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#8 console_lock console_srcu console_owner console_owner_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#8 &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#8 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem batched_entropy_u8.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem kfence_freelist_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &meta->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#8 fs_reclaim irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#8 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#8 &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#8 &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#8 nl_table_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#8 nl_table_wait.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#8 &n->list_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#8 &n->list_lock &c->lock irq_context: softirq (&llc->ack_timer.timer) irq_context: softirq (&llc->ack_timer.timer) pool_lock#2 irq_context: softirq (&llc->ack_timer.timer) slock-AF_LLC irq_context: softirq (&llc->ack_timer.timer) slock-AF_LLC pool_lock#2 irq_context: softirq (&llc->ack_timer.timer) slock-AF_LLC &c->lock irq_context: softirq (&llc->ack_timer.timer) slock-AF_LLC wlock-AF_LLC irq_context: softirq (&llc->ack_timer.timer) slock-AF_LLC rcu_read_lock_bh noop_qdisc.q.lock irq_context: softirq (&llc->ack_timer.timer) slock-AF_LLC rcu_read_lock_bh rcu_read_lock &ei->socket.wq.wait irq_context: softirq (&llc->ack_timer.timer) slock-AF_LLC rcu_read_lock_bh rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: softirq (&llc->ack_timer.timer) slock-AF_LLC rcu_read_lock_bh rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: softirq (&llc->ack_timer.timer) slock-AF_LLC rcu_read_lock_bh rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&llc->ack_timer.timer) slock-AF_LLC rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&llc->ack_timer.timer) slock-AF_LLC rcu_read_lock_bh pool_lock#2 irq_context: softirq (&llc->ack_timer.timer) slock-AF_LLC &obj_hash[i].lock irq_context: softirq (&llc->ack_timer.timer) slock-AF_LLC &base->lock irq_context: softirq (&llc->ack_timer.timer) slock-AF_LLC &base->lock &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sbi->s_md_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &ei->i_prealloc_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle tk_core.seq.seqcount irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &dd->lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &dd->lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock tk_core.seq.seqcount irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_updates &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_LLC &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock &n->list_lock &c->lock irq_context: softirq (&llc->ack_timer.timer) &c->lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 &type->i_mutex_dir_key#3 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_LLC llc_sap_list_lock &c->lock irq_context: 0 sk_lock-AF_LLC llc_sap_list_lock &____s->seqcount#2 irq_context: 0 sk_lock-AF_LLC llc_sap_list_lock &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem bit_wait_table + i irq_context: 0 rtnl_mutex remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#9 irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#9 mode_list_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#9 (console_sem).lock irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#9 console_lock console_srcu console_owner_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#9 console_lock console_srcu console_owner irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#9 console_lock console_srcu console_owner &port_lock_key irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#9 console_lock console_srcu console_owner console_owner_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#9 fs_reclaim irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#9 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#9 &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#9 &n->list_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#9 &n->list_lock &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#9 &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#9 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#9 &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#9 nl_table_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex team->team_lock_key#9 nl_table_wait.lock irq_context: softirq (&llc->ack_timer.timer) slock-AF_LLC rcu_read_lock_bh rcu_read_lock &ei->socket.wq.wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 lock#3 rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_LLC rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sk_lock-AF_LLC rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sk_lock-AF_LLC rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_LLC rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_LLC rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_LLC rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_LLC rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &list->lock#23 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC slock-AF_TIPC &list->lock#23 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &u->lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 &u->lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 &u->lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xn->hash_lock irq_context: 0 &xn->hash_lock &rq->__lock irq_context: 0 &xn->hash_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xn->hash_lock fs_reclaim irq_context: 0 &xn->hash_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &xn->hash_lock &c->lock irq_context: 0 &xn->hash_lock pool_lock#2 irq_context: 0 &xn->hash_lock &est->lock irq_context: 0 &xn->hash_lock &est->lock &obj_hash[i].lock irq_context: 0 &xn->hash_lock &est->lock &base->lock irq_context: 0 &xn->hash_lock &est->lock &base->lock &obj_hash[i].lock irq_context: 0 &xn->hash_lock (&est->timer) irq_context: 0 &xn->hash_lock &obj_hash[i].lock irq_context: 0 &xn->hash_lock &base->lock irq_context: 0 &xn->hash_lock &base->lock &obj_hash[i].lock irq_context: 0 &xn->hash_lock krc.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 kn->active#5 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#8 kn->active#5 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock &hdev->unregister_lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock &hdev->unregister_lock fs_reclaim irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock &hdev->unregister_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock &hdev->unregister_lock pool_lock#2 irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock &hdev->unregister_lock &hdev->cmd_sync_work_lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock &hdev->unregister_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock &hdev->unregister_lock &rq->__lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock &hdev->unregister_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock pool_lock#2 irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock &list->lock#8 irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock &list->lock#7 irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock &hdev->req_wait_q irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock &base->lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock &rq->__lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_node_0 irq_context: 0 sk_lock-AF_TIPC &____s->seqcount irq_context: 0 sk_lock-AF_TIPC rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_TIPC slock-AF_TIPC &list->lock#23 irq_context: 0 sk_lock-AF_TIPC slock-AF_TIPC rcu_read_lock &ei->socket.wq.wait irq_context: 0 sk_lock-AF_TIPC slock-AF_TIPC rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 sk_lock-AF_TIPC slock-AF_TIPC rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_TIPC slock-AF_TIPC rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock key#22 irq_context: 0 rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override &c->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override &n->list_lock irq_context: 0 rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 rcu_read_lock rcu_read_lock_bh fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex stock_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rtnl_mutex mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &sb->s_type->i_lock_key#8 irq_context: 0 cb_lock genl_mutex rtnl_mutex &dir->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex k-sk_lock-AF_INET6 irq_context: 0 cb_lock genl_mutex rtnl_mutex k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 cb_lock genl_mutex rtnl_mutex k-slock-AF_INET6 irq_context: 0 cb_lock genl_mutex rtnl_mutex k-clock-AF_INET6 irq_context: 0 cb_lock genl_mutex rtnl_mutex &xa->xa_lock#8 irq_context: 0 cb_lock genl_mutex rtnl_mutex &fsnotify_mark_srcu irq_context: 0 &mm->mmap_lock &anon_vma->rwsem stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->private_lock rcu_read_lock rcu_read_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 pernet_ops_rwsem uevent_sock_mutex quarantine_lock irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &xa->xa_lock#16 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &x->wait#9 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &k->list_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex gdp_mutex irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex gdp_mutex &k->list_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex bus_type_sem irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex sysfs_symlink_target_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &c->lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &dev->power.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex dpm_list_mtx irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex fs_reclaim irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex &c->lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex nl_table_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex subsys mutex#84 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex subsys mutex#84 &k->k_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock &pdata->netdev_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock rtnl_mutex irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock rtnl_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 rcu_read_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &zone->lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &n->list_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock &c->lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu quarantine_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu &c->lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu &n->list_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu pool_lock#2 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &sem->wait_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &p->pi_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex batched_entropy_u8.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex kfence_freelist_lock irq_context: 0 sb_writers#8 kn->active#5 fs_reclaim &rq->__lock irq_context: 0 &type->i_mutex_dir_key#5 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 slock-AF_INET &sk->sk_lock.wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 ppp_mutex rtnl_mutex &pn->all_ppp_mutex &c->lock irq_context: 0 ppp_mutex rtnl_mutex lock kernfs_idr_lock &c->lock irq_context: 0 ppp_mutex rtnl_mutex lock kernfs_idr_lock &____s->seqcount#2 irq_context: 0 ppp_mutex rtnl_mutex lock kernfs_idr_lock &____s->seqcount irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex &____s->seqcount#2 irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex &____s->seqcount irq_context: 0 ppp_mutex rtnl_mutex batched_entropy_u32.lock crngs.lock irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->private_lock rcu_read_lock &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->private_lock rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->private_lock rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 raw_notifier_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_callback &x->wait#24 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &base->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock kfence_freelist_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex lweventlist_lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex lweventlist_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &macvlan_netdev_addr_lock_key/1 _xmit_ETHER pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &macvlan_netdev_addr_lock_key/1 irq_context: 0 rtnl_mutex &br->hash_lock &____s->seqcount#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &dev->tx_global_lock &qdisc_xmit_lock_key irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) devices_rwsem &pdata->netdev_lock irq_context: 0 &sb->s_type->i_mutex_key#10 raw_notifier_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events deferred_process_work rtnl_mutex quarantine_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &list->lock#14 irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex sk_lock-AF_CAN irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex sk_lock-AF_CAN slock-AF_CAN irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex sk_lock-AF_CAN pcpu_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex sk_lock-AF_CAN clock-AF_CAN irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex slock-AF_CAN irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &p->pi_lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex &c->lock irq_context: softirq rcu_read_lock hwsim_radio_lock init_task.mems_allowed_seq.seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock (&timer.timer) irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_node_0 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock (console_sem).lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock console_lock console_srcu console_owner irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 pernet_ops_rwsem &rnp->exp_wq[3] irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 &sb->s_type->i_mutex_key#8 &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#8 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock rcu_node_0 irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock &hdev->lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock &hdev->lock fs_reclaim irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock &hdev->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock &hdev->lock pool_lock#2 irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock &hdev->lock &c->lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock &hdev->lock tk_core.seq.seqcount irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock &hdev->lock rlock-AF_BLUETOOTH irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock &hdev->lock hci_sk_list.lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock &hdev->lock &obj_hash[i].lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock &hdev->lock hci_dev_list_lock irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock &hdev->lock wlock-AF_BLUETOOTH irq_context: 0 (wq_completion)hci0 (work_completion)(&hdev->cmd_sync_work) &hdev->req_lock &hdev->lock &dir->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem quarantine_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_node_0 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu &rcu_state.expedited_wq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock krc.lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock krc.lock &base->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem krc.lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem krc.lock &base->lock irq_context: 0 pernet_ops_rwsem krc.lock &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem &device->compat_devs_mutex irq_context: 0 pernet_ops_rwsem &device->compat_devs_mutex &xa->xa_lock#16 irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem quarantine_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem kernfs_idr_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &meta->lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem kfence_freelist_lock irq_context: softirq (&tw->tw_timer) irq_context: softirq (&tw->tw_timer) &hashinfo->ehash_locks[i] irq_context: softirq (&tw->tw_timer) &tcp_hashinfo.bhash[i].lock irq_context: softirq (&tw->tw_timer) &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: softirq (&tw->tw_timer) &obj_hash[i].lock irq_context: softirq (&tw->tw_timer) &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: softirq (&tw->tw_timer) &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 pernet_ops_rwsem kernfs_idr_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem kernfs_idr_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem &zone->lock irq_context: 0 pernet_ops_rwsem &zone->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem dev_pm_qos_sysfs_mtx irq_context: 0 pernet_ops_rwsem dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 pernet_ops_rwsem dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 pernet_ops_rwsem subsys mutex#84 irq_context: 0 pernet_ops_rwsem subsys mutex#84 &k->k_lock irq_context: 0 pernet_ops_rwsem subsys mutex#84 &k->k_lock klist_remove_lock irq_context: 0 pernet_ops_rwsem &x->wait#9 irq_context: 0 pernet_ops_rwsem dpm_list_mtx irq_context: 0 pernet_ops_rwsem &dev->power.lock irq_context: 0 pernet_ops_rwsem deferred_probe_mutex irq_context: 0 pernet_ops_rwsem device_links_lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex mmu_notifier_invalidate_range_start irq_context: 0 pernet_ops_rwsem gdp_mutex irq_context: 0 pernet_ops_rwsem &device->unregistration_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: softirq rcu_read_lock rcu_read_lock init_task.mems_allowed_seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu pool_lock#2 irq_context: softirq (&peer->timer_persistent_keepalive) init_task.mems_allowed_seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 rtnl_mutex uevent_sock_mutex batched_entropy_u8.lock irq_context: 0 rtnl_mutex uevent_sock_mutex kfence_freelist_lock irq_context: 0 rtnl_mutex uevent_sock_mutex &meta->lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock &n->list_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_node_0 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &p->pi_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex gdp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 cb_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 rcu_read_lock pgd_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 rcu_read_lock stock_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 rcu_read_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 rcu_read_lock key irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 rcu_read_lock pcpu_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 rcu_read_lock percpu_counters_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 rcu_read_lock pcpu_lock stock_lock irq_context: 0 &x->wait#22 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq irq_context: 0 lock pidmap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &vma->vm_lock->lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &vma->vm_lock->lock &obj_hash[i].lock irq_context: 0 sb_writers#8 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#8 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER &n->list_lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &idev->mc_lock &obj_hash[i].lock pool_lock irq_context: 0 kn->active#5 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#5 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 sb_writers#5 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sb_writers#5 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 bpf_dispatcher_xdp.mutex irq_context: 0 bpf_dispatcher_xdp.mutex pack_mutex irq_context: 0 bpf_dispatcher_xdp.mutex fs_reclaim irq_context: 0 bpf_dispatcher_xdp.mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 bpf_dispatcher_xdp.mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 bpf_dispatcher_xdp.mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 bpf_dispatcher_xdp.mutex &c->lock irq_context: 0 bpf_dispatcher_xdp.mutex pool_lock#2 irq_context: 0 bpf_dispatcher_xdp.mutex free_vmap_area_lock irq_context: 0 bpf_dispatcher_xdp.mutex vmap_area_lock irq_context: 0 bpf_dispatcher_xdp.mutex &rq->__lock irq_context: 0 bpf_dispatcher_xdp.mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 bpf_dispatcher_xdp.mutex &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 bpf_dispatcher_xdp.mutex init_mm.page_table_lock irq_context: 0 bpf_dispatcher_xdp.mutex bpf_lock irq_context: 0 bpf_dispatcher_xdp.mutex text_mutex irq_context: 0 bpf_dispatcher_xdp.mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 bpf_dispatcher_xdp.mutex cpu_hotplug_lock irq_context: 0 bpf_dispatcher_xdp.mutex cpu_hotplug_lock static_call_mutex irq_context: 0 bpf_dispatcher_xdp.mutex cpu_hotplug_lock static_call_mutex text_mutex irq_context: 0 bpf_dispatcher_xdp.mutex cpu_hotplug_lock static_call_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 bpf_dispatcher_xdp.mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 bpf_dispatcher_xdp.mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 bpf_dispatcher_xdp.mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 bpf_dispatcher_xdp.mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 bpf_dispatcher_xdp.mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 bpf_dispatcher_xdp.mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 bpf_dispatcher_xdp.mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock rtnl_mutex.wait_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock &p->pi_lock irq_context: 0 bpf_dispatcher_xdp.mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 bpf_dispatcher_xdp.mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 bpf_dispatcher_xdp.mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &p->pi_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex sk_lock-AF_CAN &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex sk_lock-AF_CAN &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex.wait_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_AX25 irq_context: 0 sk_lock-AF_AX25 slock-AF_AX25 irq_context: 0 sk_lock-AF_AX25 &mm->mmap_lock irq_context: 0 slock-AF_AX25 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_AX25 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_AX25 slock-AF_AX25 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_AX25 clock-AF_AX25 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_AX25 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_AX25 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_AX25 ax25_list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_AX25 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_AX25 &list->lock#40 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_AX25 rlock-AF_AX25 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_AX25 wlock-AF_AX25 irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_AX25 irq_context: 0 &sb->s_type->i_mutex_key#8 fill_pool_map-wait-type-override &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &kernfs_locks->open_file_mutex[count] fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &kernfs_locks->open_file_mutex[count] fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 fill_pool_map-wait-type-override pool_lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock stock_lock irq_context: 0 bpf_dispatcher_xdp.mutex cpu_hotplug_lock static_call_mutex text_mutex &rq->__lock irq_context: 0 bpf_dispatcher_xdp.mutex cpu_hotplug_lock static_call_mutex text_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 bpf_dispatcher_xdp.mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work rcu_node_0 irq_context: 0 (wq_completion)events_unbound connector_reaper_work &rcu_state.expedited_wq irq_context: 0 (wq_completion)events_unbound connector_reaper_work &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work quarantine_lock irq_context: 0 &disk->open_mutex &lock->wait_lock irq_context: 0 &disk->open_mutex rcu_read_lock &rq->__lock irq_context: 0 &disk->open_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex free_vmap_area_lock irq_context: 0 rtnl_mutex vmap_area_lock irq_context: 0 rtnl_mutex &local->sta_mtx irq_context: 0 rtnl_mutex purge_vmap_area_lock irq_context: 0 rtnl_mutex purge_vmap_area_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex purge_vmap_area_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &device->compat_devs_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &device->compat_devs_mutex &xa->xa_lock#16 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &sem->wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &p->pi_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &lock->wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &zone->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &zone->lock &____s->seqcount irq_context: 0 &tsk->futex_exit_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->wiphy_work_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &rdev->wiphy_work_lock rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &rdev->wiphy_work_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sta->ampdu_mlme.mtx irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sta->ampdu_mlme.mtx &sta->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx (work_completion)(&sta->ampdu_mlme.work) irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sta->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx krc.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &local->key_mtx irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &fq->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &dentry->d_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 pin_fs_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 mount_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 mount_lock mount_lock.seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &fsnotify_mark_srcu irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &xa->xa_lock#8 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_mutex_key#3 &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock &dentry->d_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &fsnotify_mark_srcu irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &sb->s_type->i_lock_key#7 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &s->s_inode_list_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &xa->xa_lock#8 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock mount_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx mount_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx mount_lock mount_lock.seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &local->active_txq_lock[i] irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx (work_completion)(&sta->drv_deliver_wk) irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx lweventlist_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx lweventlist_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx lweventlist_lock &dir->lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx krc.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &local->queue_stop_reason_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &fq->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &local->queue_stop_reason_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx krc.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &rnp->exp_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &list->lock#15 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &meta->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem kfence_freelist_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem dev_pm_qos_sysfs_mtx irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem dev_pm_qos_sysfs_mtx &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem dev_pm_qos_sysfs_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem subsys mutex#84 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem subsys mutex#84 &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem subsys mutex#84 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem subsys mutex#84 &k->k_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem subsys mutex#84 &k->k_lock klist_remove_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &x->wait#9 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem dpm_list_mtx irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &dev->power.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem deferred_probe_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem device_links_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem gdp_mutex irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &device->unregistration_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &sem->wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx lweventlist_lock &c->lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &obj_hash[i].lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock pool_lock#2 irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock krc.lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock krc.lock &base->lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx batched_entropy_u8.lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx kfence_freelist_lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx &meta->lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx &n->list_lock irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx &wdev->mtx &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock remove_cache_srcu pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &lock->wait_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &p->pi_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu quarantine_lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu &c->lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu &n->list_lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu pool_lock#2 irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem &x->wait#10 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock rlock-AF_NETLINK irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &rnp->exp_wq[1] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem &rnp->exp_wq[0] irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events fqdir_free_work &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_node_0 irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu pool_lock#2 irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &sem->wait_lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &sem->wait_lock irq_context: 0 sk_lock-AF_SMC irq_context: 0 sk_lock-AF_SMC slock-AF_SMC irq_context: 0 sk_lock-AF_SMC &smc->clcsock_release_lock irq_context: 0 sk_lock-AF_SMC &smc->clcsock_release_lock &net->smc.mutex_fback_rsn irq_context: 0 sk_lock-AF_SMC &smc->clcsock_release_lock k-clock-AF_INET irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET fs_reclaim irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET pool_lock#2 irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock &dir->lock#2 irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock &ul->lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &obj_hash[i].lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock &____s->seqcount irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &____s->seqcount#8 irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock batched_entropy_u8.lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &hashinfo->ehash_locks[i] irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET batched_entropy_u32.lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET tk_core.seq.seqcount irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET batched_entropy_u16.lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &base->lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &ei->socket.wq.wait irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock tcp_metrics_lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock tcp_metrics_lock pool_lock#2 irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock fastopen_seqlock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock fastopen_seqlock fastopen_seqlock.seqcount irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET k-clock-AF_INET irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET k-clock-AF_INET rcu_read_lock &ei->socket.wq.wait irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET k-clock-AF_INET rcu_read_lock rcu_read_lock &ei->socket.wq.wait irq_context: 0 sk_lock-AF_SMC k-slock-AF_INET irq_context: 0 sk_lock-AF_SMC &rq->__lock irq_context: 0 sk_lock-AF_SMC &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 lock#4 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)events (work_completion)(&w->w) &rq->__lock irq_context: 0 slock-AF_SMC irq_context: 0 k-sk_lock-AF_INET &mm->mmap_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex __ip_vs_mutex irq_context: 0 cb_lock genl_mutex __ip_vs_mutex &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex __ip_vs_mutex pool_lock#2 irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET &____s->seqcount#2 irq_context: 0 nf_ct_proto_mutex nf_hook_mutex irq_context: 0 nf_ct_proto_mutex nf_hook_mutex fs_reclaim irq_context: 0 nf_ct_proto_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 nf_ct_proto_mutex nf_hook_mutex stock_lock irq_context: 0 nf_ct_proto_mutex nf_hook_mutex pool_lock#2 irq_context: 0 nf_ct_proto_mutex cpu_hotplug_lock irq_context: 0 nf_ct_proto_mutex &obj_hash[i].lock irq_context: 0 nf_ct_proto_mutex &rq->__lock irq_context: 0 nf_ct_proto_mutex pool_lock#2 irq_context: 0 nf_conntrack_mutex &nf_conntrack_locks[i] irq_context: 0 nf_conntrack_mutex &rq->__lock irq_context: 0 nf_conntrack_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET/1 k-clock-AF_INET irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET/1 k-clock-AF_INET rcu_read_lock &ei->socket.wq.wait irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET/1 k-clock-AF_INET rcu_read_lock rcu_read_lock &ei->socket.wq.wait irq_context: 0 ebt_mutex &table->lock#3 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_SMC k-clock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &ul->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nf_ct_proto_mutex nf_hook_mutex cpu_hotplug_lock irq_context: 0 nf_ct_proto_mutex &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET &ei->socket.wq.wait irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET &hashinfo->ehash_locks[i] irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET rcu_read_lock &ei->socket.wq.wait irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 cb_lock &rdev->wiphy.mtx remove_cache_srcu &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 rcu_read_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock pcpu_lock stock_lock irq_context: 0 ppp_mutex &n->list_lock irq_context: 0 ppp_mutex &n->list_lock &c->lock irq_context: 0 ppp_mutex rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 ppp_mutex &rq->__lock irq_context: 0 ppp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex ppp_mutex.wait_lock irq_context: 0 ppp_mutex.wait_lock irq_context: 0 ppp_mutex &ppp->wlock irq_context: 0 ppp_mutex &ppp->wlock &ppp->rlock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &c->lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &n->list_lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &n->list_lock &c->lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock fastopen_seqlock.seqcount irq_context: 0 &ppp->wlock &list->lock#33 irq_context: 0 &ppp->wlock &pf->rwait irq_context: 0 &pf->rwait irq_context: 0 &ppp->rlock irq_context: 0 &ppp->wlock &pf->rwait &p->pi_lock irq_context: 0 &ppp->wlock &pf->rwait &p->pi_lock &rq->__lock irq_context: 0 &ppp->wlock &pf->rwait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_PPPOX &ps->sk_lock fs_reclaim irq_context: 0 sk_lock-AF_PPPOX &ps->sk_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_PPPOX &ps->sk_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sk_lock-AF_PPPOX &ps->sk_lock pool_lock#2 irq_context: 0 sk_lock-AF_PPPOX &ps->sk_lock &dir->lock irq_context: 0 sk_lock-AF_PPPOX &ps->sk_lock &pn->all_channels_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &pch->chan_sem irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &pch->chan_sem &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &pch->chan_sem &pch->downl irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &pch->upl irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &pn->all_channels_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &pf->rwait irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &dir->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &list->lock#33 irq_context: 0 (wq_completion)l2tp (work_completion)(&tunnel->del_work) &pn->l2tp_tunnel_idr_lock &obj_hash[i].lock pool_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 clock-AF_INET6 irq_context: softirq rcu_callback clock-AF_INET6 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET k-slock-AF_INET pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &sta->rate_ctrl_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->softif_vlan_list_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->softif_vlan_list_lock kfence_freelist_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &meta->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock kfence_freelist_lock irq_context: 0 cb_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 cb_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 cb_lock rcu_read_lock rcu_read_lock_bh quarantine_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sk_lock-AF_RXRPC irq_context: 0 sk_lock-AF_RXRPC slock-AF_RXRPC irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex fs_reclaim irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex &c->lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex &n->list_lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex &n->list_lock &c->lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex &rq->__lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex pool_lock#2 irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex &obj_hash[i].lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex crngs.lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex stock_lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex &sb->s_type->i_lock_key#8 irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex &dir->lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET &____s->seqcount#8 irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET batched_entropy_u32.lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET &table->hash[i].lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET &table->hash[i].lock k-clock-AF_INET irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET &table->hash[i].lock &table->hash2[i].lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex k-slock-AF_INET irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex cpu_hotplug_lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex kthread_create_lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex &p->pi_lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex &x->wait irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex &x->wait#22 irq_context: 0 sk_lock-AF_PPPOX &dir->lock irq_context: 0 sk_lock-AF_PPPOX &pn->all_channels_lock irq_context: 0 sk_lock-AF_RXRPC &local->services_lock irq_context: 0 slock-AF_RXRPC irq_context: 0 sk_lock-AF_PPPOX &mm->mmap_lock irq_context: 0 sk_lock-AF_PPPOX rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 sk_lock-AF_PPPOX rcu_read_lock_bh pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX &pn->hash_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX rlock-AF_PPPOX irq_context: 0 sk_lock-AF_RXRPC fs_reclaim irq_context: 0 sk_lock-AF_RXRPC fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_RXRPC pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &local->services_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &rx->incoming_lock irq_context: 0 k-clock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 &x->wait irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#4 pool_lock#2 irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock batched_entropy_u8.lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock kfence_freelist_lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET rcu_read_lock rcu_node_0 irq_context: 0 k-sk_lock-AF_INET &rq->__lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex __ip_vs_mutex irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET/1 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET/1 &c->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET/1 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET/1 rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET/1 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET/1 &tcp_hashinfo.bhash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET/1 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET/1 &hashinfo->ehash_locks[i] irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 &hashinfo->ehash_locks[i] irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 elock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &dir->lock irq_context: 0 dev_map_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock (&timer.timer) irq_context: 0 (wq_completion)cfg80211 (work_completion)(&rdev->event_work) &rdev->wiphy.mtx rcu_node_0 irq_context: 0 cb_lock rcu_node_0 irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_read_lock rcu_read_lock_bh quarantine_lock irq_context: 0 &sbi->s_writepages_rwsem &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: softirq (&tw->tw_timer) &dccp_hashinfo.bhash[i].lock irq_context: softirq (&tw->tw_timer) &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock irq_context: softirq (&tw->tw_timer) &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock stock_lock irq_context: softirq (&tw->tw_timer) &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: softirq (&tw->tw_timer) &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: softirq (&tw->tw_timer) stock_lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 cb_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 batched_entropy_u32.lock crngs.lock base_crng.lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock batched_entropy_u8.lock crngs.lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock batched_entropy_u8.lock crngs.lock base_crng.lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET batched_entropy_u32.lock crngs.lock irq_context: 0 sk_lock-AF_SMC k-sk_lock-AF_INET batched_entropy_u16.lock crngs.lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_RFCOMM &mm->mmap_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) &cfs_rq->removed.lock irq_context: softirq (&icsk->icsk_delack_timer) k-slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&icsk->icsk_delack_timer) k-slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&icsk->icsk_delack_timer) k-slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[3] &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex remove_cache_srcu rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock k-sk_lock-AF_INET rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_node_0 irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx &fq->lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 &u->peer_wait &p->pi_lock irq_context: 0 &u->peer_wait &p->pi_lock &rq->__lock irq_context: 0 &u->peer_wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &p->lock &of->mutex kn->active#5 rcu_node_0 irq_context: 0 &p->lock &of->mutex kn->active#5 rcu_read_lock &rcu_state.gp_wq irq_context: 0 &p->lock &of->mutex kn->active#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &p->lock &of->mutex kn->active#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &p->lock &of->mutex kn->active#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &meta->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &rq->__lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock init_task.mems_allowed_seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock batched_entropy_u8.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex &dev->power.lock irq_context: 0 &audit_cmd_mutex.lock tk_core.seq.seqcount irq_context: 0 &audit_cmd_mutex.lock &obj_hash[i].lock irq_context: 0 &audit_cmd_mutex.lock &list->lock irq_context: 0 &audit_cmd_mutex.lock kauditd_wait.lock irq_context: 0 &audit_cmd_mutex.lock kauditd_wait.lock &p->pi_lock irq_context: 0 &audit_cmd_mutex.lock kauditd_wait.lock &p->pi_lock &rq->__lock irq_context: 0 &audit_cmd_mutex.lock kauditd_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &audit_cmd_mutex.lock &rq->__lock irq_context: 0 &audit_cmd_mutex.lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC &____s->seqcount#2 irq_context: 0 sk_lock-AF_TIPC remove_cache_srcu irq_context: 0 sk_lock-AF_TIPC remove_cache_srcu quarantine_lock irq_context: 0 sk_lock-AF_TIPC remove_cache_srcu &c->lock irq_context: 0 sk_lock-AF_TIPC remove_cache_srcu &n->list_lock irq_context: 0 sk_lock-AF_TIPC remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_TIPC remove_cache_srcu &obj_hash[i].lock irq_context: 0 sk_lock-AF_TIPC remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_TIPC remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_TIPC remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC batched_entropy_u8.lock irq_context: 0 sk_lock-AF_TIPC kfence_freelist_lock irq_context: 0 sk_lock-AF_TIPC &meta->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &lock->wait_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) quarantine_lock irq_context: 0 sk_lock-AF_TIPC &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock pgd_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock key irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock pcpu_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock percpu_counters_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock pcpu_lock stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock pool_lock#2 irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &rcu_state.gp_wq irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC remove_cache_srcu &rq->__lock irq_context: 0 sk_lock-AF_TIPC remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sk_lock-AF_TIPC fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex &rq->__lock irq_context: 0 rtnl_mutex mrt_lock#2 irq_context: 0 elock-AF_INET6 irq_context: 0 sk_lock-AF_TIPC quarantine_lock irq_context: 0 sk_lock-AF_TIPC remove_cache_srcu pool_lock#2 irq_context: 0 &list->lock#41 irq_context: 0 nfnl_subsys_ctnetlink rcu_read_lock &____s->seqcount#7 irq_context: 0 &smc->clcsock_release_lock k-sk_lock-AF_INET irq_context: 0 &smc->clcsock_release_lock k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 &smc->clcsock_release_lock k-slock-AF_INET irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &cfs_rq->removed.lock irq_context: 0 lock prog_idr_lock &n->list_lock irq_context: 0 lock prog_idr_lock &n->list_lock &c->lock irq_context: 0 nlk_cb_mutex-NETFILTER irq_context: 0 nlk_cb_mutex-NETFILTER &c->lock irq_context: 0 nlk_cb_mutex-NETFILTER pool_lock#2 irq_context: 0 nlk_cb_mutex-NETFILTER &rq->__lock irq_context: 0 nlk_cb_mutex-NETFILTER &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nlk_cb_mutex-NETFILTER fs_reclaim irq_context: 0 nlk_cb_mutex-NETFILTER fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 nlk_cb_mutex-NETFILTER rcu_read_lock pool_lock#2 irq_context: 0 nlk_cb_mutex-NETFILTER rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 nlk_cb_mutex-NETFILTER rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 nlk_cb_mutex-NETFILTER rlock-AF_NETLINK irq_context: 0 nlk_cb_mutex-NETFILTER &obj_hash[i].lock irq_context: 0 sk_lock-AF_QIPCRTR irq_context: 0 sk_lock-AF_QIPCRTR slock-AF_QIPCRTR irq_context: 0 sk_lock-AF_QIPCRTR fs_reclaim irq_context: 0 sk_lock-AF_QIPCRTR fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_QIPCRTR qrtr_ports.xa_lock irq_context: 0 sk_lock-AF_QIPCRTR qrtr_ports.xa_lock &c->lock irq_context: 0 sk_lock-AF_QIPCRTR qrtr_ports.xa_lock pool_lock#2 irq_context: 0 slock-AF_QIPCRTR irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_QIPCRTR irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_QIPCRTR slock-AF_QIPCRTR irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_QIPCRTR clock-AF_QIPCRTR irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_QIPCRTR fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_QIPCRTR fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_QIPCRTR pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_QIPCRTR qrtr_node_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_QIPCRTR rlock-AF_QIPCRTR irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_QIPCRTR &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_QIPCRTR &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_QIPCRTR rcu_read_lock &pool->lock/1 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_QIPCRTR rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_QIPCRTR rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_QIPCRTR rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_QIPCRTR rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_QIPCRTR rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_QIPCRTR qrtr_ports.xa_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_QIPCRTR qrtr_ports.xa_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_QIPCRTR qrtr_ports.xa_lock pool_lock#2 irq_context: 0 (wq_completion)qrtr_ns_handler irq_context: 0 (wq_completion)qrtr_ns_handler (work_completion)(&qrtr_ns.work) irq_context: 0 (wq_completion)qrtr_ns_handler (work_completion)(&qrtr_ns.work) fs_reclaim irq_context: 0 (wq_completion)qrtr_ns_handler (work_completion)(&qrtr_ns.work) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)qrtr_ns_handler (work_completion)(&qrtr_ns.work) pool_lock#2 irq_context: 0 (wq_completion)qrtr_ns_handler (work_completion)(&qrtr_ns.work) k-sk_lock-AF_QIPCRTR irq_context: 0 (wq_completion)qrtr_ns_handler (work_completion)(&qrtr_ns.work) k-sk_lock-AF_QIPCRTR k-slock-AF_QIPCRTR irq_context: 0 (wq_completion)qrtr_ns_handler (work_completion)(&qrtr_ns.work) k-sk_lock-AF_QIPCRTR rlock-AF_QIPCRTR irq_context: 0 (wq_completion)qrtr_ns_handler (work_completion)(&qrtr_ns.work) k-sk_lock-AF_QIPCRTR &obj_hash[i].lock irq_context: 0 (wq_completion)qrtr_ns_handler (work_completion)(&qrtr_ns.work) k-sk_lock-AF_QIPCRTR pool_lock#2 irq_context: 0 (wq_completion)qrtr_ns_handler (work_completion)(&qrtr_ns.work) k-slock-AF_QIPCRTR irq_context: 0 (wq_completion)qrtr_ns_handler (work_completion)(&qrtr_ns.work) &c->lock irq_context: 0 (wq_completion)qrtr_ns_handler (work_completion)(&qrtr_ns.work) nodes.xa_lock irq_context: 0 (wq_completion)qrtr_ns_handler (work_completion)(&qrtr_ns.work) nodes.xa_lock pool_lock#2 irq_context: 0 (wq_completion)qrtr_ns_handler (work_completion)(&qrtr_ns.work) &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_QIPCRTR rcu_state.exp_mutex rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_QIPCRTR rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_QIPCRTR rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_QIPCRTR rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_QIPCRTR rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_QIPCRTR rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_QIPCRTR rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_QIPCRTR rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_QIPCRTR rcu_state.exp_mutex &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_QIPCRTR rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_QIPCRTR &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_QIPCRTR irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem rcu_node_0 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem fib_info_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem fib_info_lock pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem class irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem (&tbl->proxy_timer) irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &base->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem krc.lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &dir->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nlk_cb_mutex-NETFILTER &n->list_lock irq_context: 0 nlk_cb_mutex-NETFILTER &n->list_lock &c->lock irq_context: 0 nlk_cb_mutex-NETFILTER rcu_read_lock rcu_node_0 irq_context: 0 nlk_cb_mutex-NETFILTER rcu_read_lock &rq->__lock irq_context: 0 nlk_cb_mutex-NETFILTER rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC fs_reclaim &rq->__lock irq_context: 0 sk_lock-AF_TIPC fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC &pcp->lock &zone->lock irq_context: 0 cb_lock &devlink->lock_key irq_context: 0 &sb->s_type->i_mutex_key#8 lock#4 rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 lock#4 &obj_hash[i].lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock &rq->__lock irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nfnl_subsys_ipset nlk_cb_mutex-NETFILTER irq_context: 0 nfnl_subsys_ipset nlk_cb_mutex-NETFILTER fs_reclaim irq_context: 0 nfnl_subsys_ipset nlk_cb_mutex-NETFILTER fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 nfnl_subsys_ipset nlk_cb_mutex-NETFILTER pool_lock#2 irq_context: 0 nfnl_subsys_ipset nlk_cb_mutex-NETFILTER &c->lock irq_context: 0 nfnl_subsys_ipset nlk_cb_mutex-NETFILTER ip_set_ref_lock irq_context: 0 nfnl_subsys_ipset nlk_cb_mutex-NETFILTER &rq->__lock irq_context: 0 nfnl_subsys_ipset nlk_cb_mutex-NETFILTER &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nfnl_subsys_ipset nlk_cb_mutex-NETFILTER rcu_read_lock pool_lock#2 irq_context: 0 nfnl_subsys_ipset nlk_cb_mutex-NETFILTER rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 nfnl_subsys_ipset nlk_cb_mutex-NETFILTER rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 nfnl_subsys_ipset nlk_cb_mutex-NETFILTER rlock-AF_NETLINK irq_context: 0 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: 0 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &n->list_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &n->list_lock &c->lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 tomoyo_ss &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock ip6_fl_lock irq_context: 0 sk_lock-AF_INET6 ip6_sk_fl_lock irq_context: 0 &sb->s_type->i_mutex_key#10 ip6_sk_fl_lock irq_context: 0 &sb->s_type->i_mutex_key#10 ip6_fl_lock irq_context: 0 &sb->s_type->i_mutex_key#10 ip6_fl_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 ip6_fl_lock &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 ip6_fl_lock &base->lock &obj_hash[i].lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex fs_reclaim irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex &c->lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex &____s->seqcount#2 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex &____s->seqcount irq_context: 0 sb_writers#3 oom_adj_mutex oom_adj_mutex.wait_lock irq_context: 0 sb_writers#3 oom_adj_mutex.wait_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem lock#4 rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem lock#4 &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem remove_cache_srcu irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem remove_cache_srcu &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem batched_entropy_u8.lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&w->w) &meta->lock irq_context: 0 (wq_completion)events (work_completion)(&w->w) kfence_freelist_lock irq_context: 0 ebt_mutex &mm->mmap_lock &cfs_rq->removed.lock irq_context: 0 ebt_mutex &mm->mmap_lock &obj_hash[i].lock irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&peer->timer_persistent_keepalive) &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_lock_key#4 bit_wait_table + i irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_node_0 irq_context: 0 sb_writers#4 jbd2_handle &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex fs_reclaim irq_context: 0 br_ioctl_mutex rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 br_ioctl_mutex rtnl_mutex pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex &c->lock irq_context: 0 br_ioctl_mutex rtnl_mutex &dir->lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex &n->list_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &n->list_lock &c->lock irq_context: 0 br_ioctl_mutex rtnl_mutex deferred_lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex (console_sem).lock irq_context: 0 br_ioctl_mutex rtnl_mutex console_lock console_srcu console_owner_lock irq_context: 0 br_ioctl_mutex rtnl_mutex console_lock console_srcu console_owner irq_context: 0 br_ioctl_mutex rtnl_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 br_ioctl_mutex rtnl_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 br_ioctl_mutex rtnl_mutex pcpu_alloc_mutex irq_context: 0 br_ioctl_mutex rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 br_ioctl_mutex rtnl_mutex net_rwsem irq_context: 0 br_ioctl_mutex rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex &tn->lock irq_context: 0 br_ioctl_mutex rtnl_mutex target_list_lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock (console_sem).lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock console_lock console_srcu console_owner_lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock console_lock console_srcu console_owner irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock _xmit_ETHER irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex &dev_addr_list_lock_key#2/1 irq_context: 0 br_ioctl_mutex rtnl_mutex &dev_addr_list_lock_key#2/1 rcu_read_lock _xmit_ETHER irq_context: 0 br_ioctl_mutex rtnl_mutex &____s->seqcount irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex lock irq_context: 0 br_ioctl_mutex rtnl_mutex lock kernfs_idr_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &root->kernfs_rwsem irq_context: 0 br_ioctl_mutex rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 br_ioctl_mutex rtnl_mutex sysfs_symlink_target_lock irq_context: 0 br_ioctl_mutex rtnl_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 br_ioctl_mutex rtnl_mutex nl_table_lock irq_context: 0 br_ioctl_mutex rtnl_mutex nl_table_wait.lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &c->lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &ndev->lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &n->list_lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &n->list_lock &c->lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock nl_table_lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock nl_table_wait.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle lock#4 rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle lock#4 &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &dev_addr_list_lock_key#2/1 irq_context: 0 br_ioctl_mutex rtnl_mutex &br->hash_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->hash_lock pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex &br->hash_lock rcu_read_lock rhashtable_bucket irq_context: 0 br_ioctl_mutex rtnl_mutex &br->hash_lock nl_table_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->hash_lock &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->hash_lock nl_table_wait.lock irq_context: 0 br_ioctl_mutex rtnl_mutex (switchdev_blocking_notif_chain).rwsem irq_context: 0 br_ioctl_mutex rtnl_mutex (switchdev_blocking_notif_chain).rwsem &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex (switchdev_blocking_notif_chain).rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex batched_entropy_u32.lock irq_context: 0 br_ioctl_mutex rtnl_mutex team->team_lock_key#3 irq_context: 0 br_ioctl_mutex rtnl_mutex team->team_lock_key#3 fs_reclaim irq_context: 0 br_ioctl_mutex rtnl_mutex team->team_lock_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 br_ioctl_mutex rtnl_mutex team->team_lock_key#3 &c->lock irq_context: 0 br_ioctl_mutex rtnl_mutex team->team_lock_key#3 &n->list_lock irq_context: 0 br_ioctl_mutex rtnl_mutex team->team_lock_key#3 &n->list_lock &c->lock irq_context: 0 br_ioctl_mutex rtnl_mutex team->team_lock_key#3 &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex team->team_lock_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex team->team_lock_key#3 pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex &br->hash_lock &c->lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->hash_lock &____s->seqcount#2 irq_context: 0 br_ioctl_mutex rtnl_mutex &br->hash_lock &____s->seqcount irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock rhashtable_bucket irq_context: 0 br_ioctl_mutex rtnl_mutex &br->lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->lock pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex &br->lock &dir->lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex &br->lock deferred_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->lock (console_sem).lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->lock console_lock console_srcu console_owner_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->lock console_lock console_srcu console_owner irq_context: 0 br_ioctl_mutex rtnl_mutex &br->lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 br_ioctl_mutex rtnl_mutex &br->lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->lock &c->lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->lock &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->lock nl_table_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->lock &____s->seqcount irq_context: 0 br_ioctl_mutex rtnl_mutex &br->lock rcu_read_lock pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex &br->lock nl_table_wait.lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->lock &base->lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->lock &base->lock &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->lock &br->multicast_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->lock &br->multicast_lock &base->lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 br_ioctl_mutex &rq->__lock irq_context: 0 br_ioctl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rq->__lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 tomoyo_ss batched_entropy_u8.lock irq_context: 0 sb_writers#8 tomoyo_ss kfence_freelist_lock irq_context: 0 sb_writers#8 tomoyo_ss &meta->lock irq_context: 0 &p->lock pgd_lock irq_context: 0 &p->lock rcu_read_lock pool_lock#2 irq_context: 0 &p->lock &obj_hash[i].lock irq_context: 0 &p->lock key irq_context: 0 &p->lock pcpu_lock irq_context: 0 &p->lock percpu_counters_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock batched_entropy_u8.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock kfence_freelist_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &sbi->s_orphan_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_es_lock &sbi->s_es_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_es_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_es_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &bgl->locks[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &journal->j_wait_commit irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &journal->j_wait_done_commit irq_context: 0 nfnl_subsys_ctnetlink nlk_cb_mutex-NETFILTER &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock &meta->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock kfence_freelist_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu &c->lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock stock_lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock tk_core.seq.seqcount irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock batched_entropy_u32.lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock batched_entropy_u32.lock crngs.lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &base->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &hashinfo->ehash_locks[i] irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock &base->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock &base->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock &c->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock stock_lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &ul->lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock key#8 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &c->lock irq_context: 0 rtnl_mutex rcu_state.barrier_mutex rcu_state.barrier_mutex.wait_lock irq_context: 0 &iint->mutex ima_extend_list_mutex rcu_read_lock rcu_node_0 irq_context: 0 &iint->mutex ima_extend_list_mutex rcu_read_lock &rq->__lock irq_context: 0 &iint->mutex ima_extend_list_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#3 irq_context: 0 &sbi->s_writepages_rwsem &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &sbi->s_writepages_rwsem &mapping->i_mmap_rwsem ptlock_ptr(page)#2 irq_context: 0 &sbi->s_writepages_rwsem &mapping->private_lock irq_context: 0 &sbi->s_writepages_rwsem &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &memcg->move_lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &xa->xa_lock#8 irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &xa->xa_lock#8 &s->s_inode_wblist_lock irq_context: 0 rcu_read_lock rcu_read_lock_bh &meta->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh kfence_freelist_lock irq_context: 0 &tfile->socket.wq.wait irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex fs_reclaim mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 &xt[i].mutex fs_reclaim mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 &xt[i].mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP &mm->mmap_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP/1 irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP/1 slock-AF_BLUETOOTH-BTPROTO_L2CAP irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 slock-AF_BLUETOOTH-BTPROTO_L2CAP irq_context: 0 sb_writers#3 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &(ei->i_block_reservation_lock) key#3 irq_context: softirq (&n->timer) &dir->lock#2 irq_context: softirq (&n->timer) &ul->lock#2 irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: softirq (&req->rsk_timer) irq_context: softirq (&req->rsk_timer) &hashinfo->ehash_locks[i] irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &pa->pa_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock rcu_read_lock &tfile->socket.wq.wait irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock rcu_read_lock &tfile->socket.wq.wait &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock rcu_read_lock &tfile->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock rcu_read_lock &tfile->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock rcu_read_lock &tfile->socket.wq.wait irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock rcu_read_lock &tfile->socket.wq.wait &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rtnl_mutex &idev->mc_lock remove_cache_srcu irq_context: 0 rtnl_mutex &idev->mc_lock remove_cache_srcu quarantine_lock irq_context: 0 rtnl_mutex &idev->mc_lock remove_cache_srcu &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock remove_cache_srcu &n->list_lock irq_context: 0 rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock remove_cache_srcu &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem quarantine_lock irq_context: 0 lweventlist_lock &obj_hash[i].lock irq_context: 0 &dir->lock#2 &meta->lock irq_context: 0 &dir->lock#2 kfence_freelist_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem key#3 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &folio_wait_table[i] &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &folio_wait_table[i] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &dd->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &dd->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8/4 &sem->wait_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8/4 &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8/4 &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8/4 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 base_sockets.lock irq_context: 0 &sb->s_type->i_mutex_key#10 (console_sem).lock irq_context: 0 &sb->s_type->i_mutex_key#10 base_sockets.lock irq_context: 0 &sb->s_type->i_mutex_key#10 clock-AF_ISDN irq_context: 0 sk_lock-AF_INET fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) &rq->__lock irq_context: 0 raw_lock irq_context: 0 &sb->s_type->i_mutex_key#10 raw_lock irq_context: 0 sb_writers#8 tomoyo_ss batched_entropy_u8.lock crngs.lock irq_context: 0 &sb->s_type->i_lock_key rcu_read_lock &dentry->d_lock irq_context: 0 &sb->s_type->i_lock_key &dentry->d_lock &dentry->d_lock/1 irq_context: 0 &sb->s_type->i_lock_key &dentry->d_lock/1 irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &tbl->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &folio_wait_table[i] &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &folio_wait_table[i] &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &folio_wait_table[i] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_PHONET &pnsocks.lock irq_context: 0 sk_lock-AF_PHONET resource_mutex irq_context: 0 sk_lock-AF_PHONET rcu_state.exp_mutex rcu_node_0 irq_context: 0 sk_lock-AF_PHONET rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 sk_lock-AF_PHONET rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_PHONET rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_PHONET rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sk_lock-AF_PHONET rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_PHONET rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_PHONET rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 sk_lock-AF_PHONET rcu_state.exp_mutex &rq->__lock irq_context: 0 sk_lock-AF_PHONET rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_PHONET &obj_hash[i].lock irq_context: 0 clock-AF_PHONET irq_context: 0 rlock-AF_PHONET irq_context: 0 &list->lock#35 irq_context: 0 sk_lock-AF_PHONET rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 cb_lock genl_mutex rtnl_mutex &sdata->sec_mtx irq_context: 0 cb_lock genl_mutex rtnl_mutex &sdata->sec_mtx &sec->lock irq_context: 0 &group->mark_mutex rcu_read_lock rcu_node_0 irq_context: 0 &group->mark_mutex rcu_read_lock &rq->__lock irq_context: 0 &group->mark_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &group->mark_mutex &lock->wait_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 pgd_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 key irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 pcpu_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 percpu_counters_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 pcpu_lock stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock ovs_mutex irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_node_0 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: softirq (&n->timer) rcu_read_lock &ndev->lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock pool_lock#2 irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock &c->lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq (&n->timer) stock_lock irq_context: softirq net/ipv6/ip6_flowlabel.c:47 irq_context: softirq net/ipv6/ip6_flowlabel.c:47 ip6_fl_lock irq_context: softirq net/ipv6/ip6_flowlabel.c:47 ip6_fl_lock &obj_hash[i].lock irq_context: softirq net/ipv6/ip6_flowlabel.c:47 ip6_fl_lock &base->lock irq_context: softirq net/ipv6/ip6_flowlabel.c:47 ip6_fl_lock &base->lock &obj_hash[i].lock irq_context: softirq net/ipv6/ip6_flowlabel.c:47 ip6_fl_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &root->kernfs_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &pcp->lock &zone->lock irq_context: 0 mapping.invalidate_lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#7 irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &nf_nat_locks[i] irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 lock#4 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &tbl->lock pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &tbl->lock &c->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &tbl->lock &n->list_lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &tbl->lock &n->list_lock &c->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &tbl->lock batched_entropy_u32.lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &tbl->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_node_0 irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 rtnl_mutex &dev->tx_global_lock _xmit_LOOPBACK#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 rtnl_mutex &newf->file_lock irq_context: 0 rtnl_mutex lock link_idr_lock irq_context: 0 rtnl_mutex lock link_idr_lock pool_lock#2 irq_context: 0 rtnl_mutex &sb->s_type->i_lock_key#15 irq_context: 0 rtnl_mutex &sb->s_type->i_lock_key#15 &dentry->d_lock irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex &rq->__lock irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex text_mutex irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex cpu_hotplug_lock irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex cpu_hotplug_lock static_call_mutex irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex cpu_hotplug_lock static_call_mutex text_mutex irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex cpu_hotplug_lock static_call_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex &obj_hash[i].lock irq_context: softirq net/ipv6/ip6_flowlabel.c:57 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &r->consumer_lock#3 irq_context: 0 rtnl_mutex &r->consumer_lock#4 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: softirq rcu_read_lock &sch->q.lock irq_context: softirq rcu_read_lock &sch->q.lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock &list->lock#5 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock &n->list_lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex smc_ib_devices.mutex &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex smc_ib_devices.mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &folio_wait_table[i] irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &folio_wait_table[i] &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &folio_wait_table[i] &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &folio_wait_table[i] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex.wait_lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &c->lock irq_context: softirq rcu_read_lock &sch->q.lock hrtimer_bases.lock irq_context: softirq rcu_read_lock &sch->q.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &sch->q.lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock crngs.lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock ptlock_ptr(page) irq_context: 0 sk_lock-AF_INET6 &h->lhash2[i].lock clock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 &h->lhash2[i].lock reuseport_lock irq_context: 0 sk_lock-AF_INET6 &h->lhash2[i].lock reuseport_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 &h->lhash2[i].lock reuseport_lock reuseport_ida.xa_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock &pcp->lock &zone->lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock ptlock_ptr(page)#2 lock#4 &lruvec->lru_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &____s->seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 hrtimer_bases.lock irq_context: 0 sk_lock-AF_INET6 hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &n->list_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &h->lhash2[i].lock reuseport_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &h->lhash2[i].lock reuseport_lock clock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &h->lhash2[i].lock reuseport_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &h->lhash2[i].lock reuseport_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 batched_entropy_u8.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 kfence_freelist_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &meta->lock irq_context: softirq rcu_callback reuseport_ida.xa_lock irq_context: 0 sk_lock-AF_INET6 slock-AF_INET6 tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 hrtimer_bases.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 hrtimer_bases.lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 hrtimer_bases.lock &obj_hash[i].lock irq_context: softirq slock-AF_INET6 pool_lock#2 irq_context: softirq slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq slock-AF_INET6 rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: softirq slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock irq_context: softirq slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock pool_lock#2 irq_context: softirq slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: softirq slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock irq_context: softirq slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: softirq slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: softirq slock-AF_INET6 hrtimer_bases.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock crngs.lock irq_context: softirq slock-AF_INET6 tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 batched_entropy_u32.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock ptlock_ptr(page)#2 key irq_context: 0 sk_lock-AF_INET6 &sd->defer_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 slock-AF_INET6 &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem quarantine_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &device->unregistration_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &device->unregistration_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 &h->lhash2[i].lock reuseport_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 &mm->mmap_lock stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 &mapping->private_lock rcu_read_lock rcu_read_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex key#25 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &obj_hash[i].lock pool_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 &hashinfo->ehash_locks[i] irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock stock_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 integrity_iint_lock irq_context: 0 sb_writers#4 &iint->mutex &ei->xattr_sem irq_context: 0 sb_writers#4 &iint->mutex mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &iint->mutex fs_reclaim irq_context: 0 sb_writers#4 &iint->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &iint->mutex rcu_read_lock mount_lock.seqcount irq_context: 0 sb_writers#4 &iint->mutex rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sb_writers#4 &iint->mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &iint->mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &iint->mutex rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &iint->mutex &c->lock irq_context: 0 sb_writers#4 &iint->mutex ima_extend_list_mutex irq_context: 0 sb_writers#4 &iint->mutex ima_extend_list_mutex fs_reclaim irq_context: 0 sb_writers#4 &iint->mutex ima_extend_list_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &iint->mutex ima_extend_list_mutex &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work &dir->lock quarantine_lock irq_context: 0 &kernfs_locks->open_file_mutex[count] fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &xa->xa_lock#8 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &mapping->i_mmap_rwsem ptlock_ptr(page)#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &mapping->private_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &memcg->move_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &xa->xa_lock#8 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &xa->xa_lock#8 &s->s_inode_wblist_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &dd->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem lock#4 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem lock#5 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &dd->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mapping->i_mmap_rwsem irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mapping->i_mmap_rwsem lock#4 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mapping->i_mmap_rwsem lock#5 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mapping->i_mmap_rwsem ptlock_ptr(page)#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mapping->i_mmap_rwsem ptlock_ptr(page)#2 &mapping->private_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mapping->i_mmap_rwsem ptlock_ptr(page)#2 &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock stock_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &xa->xa_lock#8 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock lock#4 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &ei->i_es_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &dd->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &dd->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock lock#4 &lruvec->lru_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock mapping.invalidate_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &folio_wait_table[i] irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &mapping->i_mmap_rwsem ptlock_ptr(page)#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &memcg->move_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &xa->xa_lock#8 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &xa->xa_lock#8 &s->s_inode_wblist_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &mapping->private_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &mapping->i_mmap_rwsem &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &mapping->i_mmap_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &xa->xa_lock#8 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock &____s->seqcount#2 irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &ret->b_state_lock bit_wait_table + i irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem batched_entropy_u8.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem kfence_freelist_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &meta->lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &iint->mutex &n->list_lock irq_context: 0 sb_writers#4 &iint->mutex &n->list_lock &c->lock irq_context: 0 sb_writers#4 &iint->mutex &rq->__lock irq_context: 0 sb_writers#4 &iint->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &iint->mutex ima_extend_list_mutex &____s->seqcount#2 irq_context: 0 sb_writers#4 &iint->mutex ima_extend_list_mutex &____s->seqcount irq_context: 0 sb_writers#4 &iint->mutex ima_extend_list_mutex &rq->__lock irq_context: 0 sb_writers#4 &iint->mutex ima_extend_list_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_node_0 irq_context: 0 mapping.invalidate_lock mmu_notifier_invalidate_range_start irq_context: 0 mapping.invalidate_lock &____s->seqcount irq_context: 0 mapping.invalidate_lock pool_lock#2 irq_context: 0 mapping.invalidate_lock stock_lock irq_context: 0 mapping.invalidate_lock &xa->xa_lock#8 irq_context: 0 mapping.invalidate_lock lock#4 irq_context: 0 mapping.invalidate_lock &xa->xa_lock#8 stock_lock irq_context: 0 mapping.invalidate_lock &xa->xa_lock#8 pool_lock#2 irq_context: 0 mapping.invalidate_lock &rq->__lock irq_context: 0 mapping.invalidate_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 mapping.invalidate_lock rcu_read_lock rcu_node_0 irq_context: 0 mapping.invalidate_lock rcu_read_lock &rq->__lock irq_context: 0 mapping.invalidate_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 pool_lock irq_context: 0 &mm->mmap_lock mapping.invalidate_lock irq_context: 0 mapping.invalidate_lock &folio_wait_table[i] irq_context: 0 mapping.invalidate_lock &ei->i_es_lock irq_context: 0 mapping.invalidate_lock tk_core.seq.seqcount irq_context: 0 mapping.invalidate_lock &dd->lock irq_context: 0 mapping.invalidate_lock rcu_read_lock &dd->lock irq_context: 0 mapping.invalidate_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 mapping.invalidate_lock rcu_read_lock pool_lock#2 irq_context: 0 mapping.invalidate_lock rcu_read_lock tk_core.seq.seqcount irq_context: 0 mapping.invalidate_lock rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh &sch->q.lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh &sch->q.lock &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh &sch->q.lock &____s->seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh &sch->q.lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &____s->seqcount#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &mapping->i_mmap_rwsem &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &mapping->i_mmap_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock &ei->i_raw_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &____s->seqcount#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem lock#4 &lruvec->lru_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock crngs.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mapping->i_mmap_rwsem ptlock_ptr(page)#2 &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mapping->i_mmap_rwsem ptlock_ptr(page)#2 pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mapping->i_mmap_rwsem rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mapping->i_mmap_rwsem &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock crngs.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &xa->xa_lock#8 key#10 irq_context: 0 &f->f_pos_lock sb_writers#4 &fq->mq_flush_lock tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &fq->mq_flush_lock &q->requeue_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &fq->mq_flush_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &fq->mq_flush_lock rcu_read_lock &pool->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &fq->mq_flush_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &fq->mq_flush_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &fq->mq_flush_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ifibss->timer) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq (&ifibss->timer) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock &local->queue_stop_reason_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock hwsim_radio_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_read_lock &list->lock#16 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem quarantine_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock key#10 irq_context: 0 mapping.invalidate_lock lock#4 &lruvec->lru_lock irq_context: 0 mapping.invalidate_lock &obj_hash[i].lock irq_context: 0 mapping.invalidate_lock &c->lock irq_context: 0 mapping.invalidate_lock &ei->i_es_lock key#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &n->list_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &hctx->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock sb_pagefaults &rq->__lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &sch->q.lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &sch->q.lock batched_entropy_u32.lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &sch->q.lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh &sch->q.lock &n->list_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh &sch->q.lock &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &ei->i_es_lock key#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem &____s->seqcount irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &n->list_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &n->list_lock &c->lock irq_context: 0 sb_writers#4 sb_writers#4 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 sb_writers#4 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem bit_wait_table + i irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock batched_entropy_u8.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock kfence_freelist_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem bit_wait_table + i irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &meta->lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) quarantine_lock irq_context: 0 &mm->mmap_lock sb_writers#4 jbd2_handle &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &mapping->i_mmap_rwsem ptlock_ptr(page)#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock &zone->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 pool_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&flowtable->gc_work)->work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&flowtable->gc_work)->work) rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle hrtimer_bases.lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &sch->q.lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &sch->q.lock batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &sch->q.lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &pcp->lock &zone->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_es_lock key#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &c->lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &pcp->lock &zone->lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock pool_lock#2 irq_context: 0 mapping.invalidate_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &journal->j_wait_updates &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_es_lock key#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &____s->seqcount#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock key#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &ei->i_data_sem &sem->wait_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh &sch->q.lock &pcp->lock &zone->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh &sch->q.lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_es_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &mapping->private_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &journal->j_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &journal->j_revoke_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &bgl->locks[i].lock &sbi->s_md_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &bgl->locks[i].lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &bgl->locks[i].lock pool_lock#2 irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 stock_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &xa->xa_lock#8 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 lock#4 irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sbi->s_writepages_rwsem &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle remove_cache_srcu irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle remove_cache_srcu quarantine_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle remove_cache_srcu &c->lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle remove_cache_srcu &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle remove_cache_srcu &n->list_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle remove_cache_srcu &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle remove_cache_srcu pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem ptlock_ptr(page)#2 irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &mapping->private_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &pool->lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sbi->s_writepages_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ret->b_state_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_reserved irq_context: 0 sb_writers#4 sb_writers#4 &cfs_rq->removed.lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mapping->i_mmap_rwsem &rq->__lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &____s->seqcount#2 irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &(ei->i_block_reservation_lock) key#14 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock key#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &mapping->private_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem bit_wait_table + i irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem bit_wait_table + i irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page)#2 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem/1 &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8/4 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8/4 &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8/4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle batched_entropy_u8.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle kfence_freelist_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_es_lock &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_es_lock &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &bgl->locks[i].lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &ret->b_state_lock bit_wait_table + i irq_context: 0 mapping.invalidate_lock rcu_read_lock &c->lock irq_context: 0 mapping.invalidate_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 mapping.invalidate_lock rcu_read_lock &____s->seqcount irq_context: 0 sb_writers#4 pgd_lock irq_context: 0 sb_writers#4 rcu_read_lock &____s->seqcount irq_context: 0 sb_writers#4 key irq_context: 0 sb_writers#4 pcpu_lock irq_context: 0 sb_writers#4 percpu_counters_lock irq_context: 0 sb_writers#4 pcpu_lock stock_lock irq_context: 0 &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 &sbi->s_writepages_rwsem &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 &sbi->s_writepages_rwsem &journal->j_state_lock &obj_hash[i].lock irq_context: 0 &sbi->s_writepages_rwsem &journal->j_state_lock &base->lock irq_context: 0 &sbi->s_writepages_rwsem &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &cfs_rq->removed.lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle lock#4 &lruvec->lru_lock irq_context: softirq rcu_read_lock rcu_read_lock &pcp->lock &zone->lock &____s->seqcount irq_context: softirq rcu_read_lock &sta->rate_ctrl_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx quarantine_lock irq_context: 0 sk_lock-AF_INET6 &table->hash[i].lock reuseport_lock irq_context: 0 sk_lock-AF_INET6 &table->hash[i].lock reuseport_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 &table->hash[i].lock reuseport_lock reuseport_ida.xa_lock irq_context: 0 sk_lock-AF_INET6 free_vmap_area_lock irq_context: 0 sk_lock-AF_INET6 vmap_area_lock irq_context: 0 sk_lock-AF_INET6 pcpu_alloc_mutex irq_context: 0 sk_lock-AF_INET6 pcpu_alloc_mutex pcpu_lock irq_context: 0 sk_lock-AF_INET6 pack_mutex irq_context: 0 sk_lock-AF_INET6 text_mutex irq_context: 0 sk_lock-AF_INET6 text_mutex ptlock_ptr(page)#2 irq_context: 0 sk_lock-AF_INET6 &fp->aux->used_maps_mutex irq_context: 0 sk_lock-AF_INET6 reuseport_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &table->hash[i].lock reuseport_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &table->hash[i].lock reuseport_lock clock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 &table->hash[i].lock reuseport_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &table->hash[i].lock reuseport_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 rtnl_mutex cpu_hotplug_lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex cpu_hotplug_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex cpu_hotplug_lock &rq->__lock irq_context: 0 (wq_completion)events_highpri (work_completion)(flush) irq_context: 0 rtnl_mutex cpu_hotplug_lock (work_completion)(flush) irq_context: 0 rtnl_mutex cpu_hotplug_lock &x->wait#10 irq_context: 0 rtnl_mutex cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_highpri (work_completion)(flush) &list->lock#5 irq_context: 0 (wq_completion)events_highpri (work_completion)(&barr->work) irq_context: 0 (wq_completion)events_highpri (work_completion)(&barr->work) &x->wait#10 irq_context: 0 (wq_completion)events_highpri (work_completion)(&barr->work) &x->wait#10 &p->pi_lock irq_context: 0 (wq_completion)events_highpri (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_highpri (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex cpu_hotplug_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_state.barrier_mutex rcu_read_lock &stopper->lock irq_context: 0 rtnl_mutex rcu_state.barrier_mutex rcu_read_lock &stop_pi_lock irq_context: 0 rtnl_mutex rcu_state.barrier_mutex rcu_read_lock &stop_pi_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_state.barrier_mutex rcu_read_lock &stop_pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC &rq->__lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC slock-AF_UNSPEC irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC fs_reclaim irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC pool_lock#2 irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC free_vmap_area_lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC vmap_area_lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC &____s->seqcount irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC stock_lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC &c->lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC pcpu_alloc_mutex irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC pcpu_alloc_mutex pcpu_lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC &mm->mmap_lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC &obj_hash[i].lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC pack_mutex irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC batched_entropy_u32.lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC text_mutex irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC text_mutex ptlock_ptr(page)#2 irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC &fp->aux->used_maps_mutex irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC &fp->aux->used_maps_mutex &rq->__lock irq_context: 0 rtnl_mutex sk_lock-AF_UNSPEC &fp->aux->used_maps_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex slock-AF_UNSPEC irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[3] &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &journal->j_wait_transaction_locked irq_context: 0 sb_writers#4 sb_writers#4 &journal->j_wait_transaction_locked irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &obj_hash[i].lock pool_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex rcu_read_lock &ul->lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key krc.lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex (work_completion)(&(&slave->notify_work)->work) irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_IPGRE &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_IPGRE krc.lock irq_context: 0 napi_hash_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &ei->i_es_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &ei->i_es_lock key#2 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &mapping->private_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &meta->lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex kfence_freelist_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &sb->s_type->i_lock_key#3 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &sb->s_type->i_lock_key#3 &xa->xa_lock#8 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex lock#4 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex lock#4 &lruvec->lru_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex lock#5 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &lruvec->lru_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &sb->s_type->i_lock_key#3 &xa->xa_lock#8 &obj_hash[i].lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &sb->s_type->i_lock_key#3 &xa->xa_lock#8 pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &sb->s_type->i_lock_key#3 &xa->xa_lock#8 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &sb->s_type->i_lock_key#3 &xa->xa_lock#8 &obj_hash[i].lock pool_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &stopper->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &stop_pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &stop_pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &stop_pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &sch->q.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &sch->q.lock batched_entropy_u32.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &sch->q.lock tk_core.seq.seqcount irq_context: 0 &rdma_nl_types[idx].sem &____s->seqcount irq_context: 0 &rdma_nl_types[idx].sem rcu_read_lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem &rxe->usdev_lock irq_context: 0 &rdma_nl_types[idx].sem &rxe->usdev_lock &pdata->netdev_lock irq_context: 0 &rdma_nl_types[idx].sem &rxe->usdev_lock rtnl_mutex irq_context: 0 &rdma_nl_types[idx].sem &pdata->netdev_lock irq_context: 0 bpf_stats_enabled_mutex &c->lock irq_context: 0 bpf_stats_enabled_mutex cpu_hotplug_lock jump_label_mutex jump_label_mutex.wait_lock irq_context: 0 bpf_stats_enabled_mutex cpu_hotplug_lock jump_label_mutex &rq->__lock irq_context: 0 bpf_stats_enabled_mutex cpu_hotplug_lock jump_label_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex.wait_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock rcu_node_0 irq_context: 0 &type->i_mutex_dir_key#3 rcu_node_0 irq_context: 0 &type->i_mutex_dir_key#3 &rcu_state.gp_wq irq_context: 0 &type->i_mutex_dir_key#3 &rcu_state.gp_wq &p->pi_lock irq_context: 0 &type->i_mutex_dir_key#3 &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#3 &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC &srv->idr_lock irq_context: 0 sk_lock-AF_TIPC &srv->idr_lock &c->lock irq_context: 0 sk_lock-AF_TIPC &srv->idr_lock pool_lock#2 irq_context: 0 sk_lock-AF_TIPC &tn->nametbl_lock irq_context: 0 sk_lock-AF_TIPC &tn->nametbl_lock pool_lock#2 irq_context: 0 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock irq_context: 0 sk_lock-AF_TIPC &con->sub_lock irq_context: 0 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock pool_lock#2 irq_context: 0 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &sub->lock irq_context: 0 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &sub->lock &srv->idr_lock irq_context: 0 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &sub->lock pool_lock#2 irq_context: 0 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &sub->lock &con->outqueue_lock irq_context: 0 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &sub->lock rcu_read_lock &pool->lock/1 irq_context: 0 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &sub->lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &sub->lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &sub->lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &sub->lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &sub->lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)tipc_send irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) &con->outqueue_lock irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) &c->lock irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) pool_lock#2 irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) &list->lock#42 irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) slock-AF_TIPC &list->lock#42 irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) &con->outqueue_lock &obj_hash[i].lock irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) &con->outqueue_lock pool_lock#2 irq_context: 0 rtnl_mutex uevent_sock_mutex.wait_lock irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) slock-AF_TIPC pool_lock#2 irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) slock-AF_TIPC &c->lock irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) slock-AF_TIPC &obj_hash[i].lock irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) &list->lock#23 irq_context: 0 (wq_completion)tipc_send (work_completion)(&con->swork) slock-AF_TIPC &list->lock#23 irq_context: 0 sk_lock-AF_TIPC slock-AF_TIPC pool_lock#2 irq_context: 0 sk_lock-AF_TIPC slock-AF_TIPC &obj_hash[i].lock irq_context: 0 sk_lock-AF_TIPC slock-AF_TIPC &list->lock#19 irq_context: 0 sk_lock-AF_TIPC slock-AF_TIPC &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC slock-AF_TIPC pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC slock-AF_TIPC &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC slock-AF_TIPC &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &srv->idr_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &con->sub_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &con->sub_lock &tn->nametbl_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &con->sub_lock &tn->nametbl_lock &service->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &con->sub_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &con->sub_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &con->outqueue_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tn->nametbl_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &sub->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &sub->lock &srv->idr_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &sub->lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &sub->lock &con->outqueue_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &sub->lock rcu_read_lock &pool->lock/1 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &sub->lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &sub->lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &sub->lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &sub->lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &sub->lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tn->nametbl_lock &nt->cluster_scope_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tn->nametbl_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tn->nametbl_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tn->nametbl_lock krc.lock irq_context: 0 &rdma_nl_types[idx].sem &rxe->usdev_lock rtnl_mutex &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem &rxe->usdev_lock rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem &rxe->usdev_lock rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &srv->idr_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &srv->idr_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock krc.lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &n->list_lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &n->list_lock &c->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem pgd_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem stock_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem key irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem pcpu_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem percpu_counters_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem pcpu_lock stock_lock irq_context: 0 rtnl_mutex mrt_lock#2 pool_lock#2 irq_context: 0 rtnl_mutex mrt_lock#2 &dir->lock#2 irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex uevent_sock_mutex fs_reclaim &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &fsnotify_mark_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key &c->lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &ndev->lock irq_context: 0 rtnl_mutex rcu_read_lock nl_table_lock irq_context: 0 rtnl_mutex rcu_read_lock nl_table_wait.lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 &xa->xa_lock#4 irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 stock_lock irq_context: 0 rtnl_mutex _xmit_ETHER/2 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &c->lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &____s->seqcount#2 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &____s->seqcount irq_context: 0 rtnl_mutex (&hsr->prune_timer) irq_context: 0 rtnl_mutex (&hsr->announce_timer) irq_context: 0 rtnl_mutex &dentry->d_lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 pin_fs_lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 mount_lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 mount_lock mount_lock.seqcount irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 &fsnotify_mark_srcu irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 &xa->xa_lock#8 irq_context: 0 rtnl_mutex rcu_read_lock &dentry->d_lock irq_context: 0 rtnl_mutex &fsnotify_mark_srcu irq_context: 0 rtnl_mutex &sb->s_type->i_lock_key#7 irq_context: 0 rtnl_mutex &s->s_inode_list_lock irq_context: 0 rtnl_mutex &xa->xa_lock#8 irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER/2 irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER/2 &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER/2 krc.lock irq_context: 0 &rdma_nl_types[idx].sem &rxe->usdev_lock rtnl_mutex.wait_lock irq_context: 0 &rdma_nl_types[idx].sem &rxe->usdev_lock &p->pi_lock irq_context: 0 &rdma_nl_types[idx].sem &rxe->usdev_lock &p->pi_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem &rxe->usdev_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem &rxe->usdev_lock &rq->__lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_state.exp_mutex &cfs_rq->removed.lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex pool_lock#2 irq_context: softirq (&ndev->rs_timer) &ndev->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&ndev->rs_timer) &ndev->lock fill_pool_map-wait-type-override &c->lock irq_context: softirq (&ndev->rs_timer) &ndev->lock fill_pool_map-wait-type-override pool_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock rlock-AF_PACKET irq_context: softirq (&ndev->rs_timer) &ndev->lock fill_pool_map-wait-type-override &n->list_lock irq_context: softirq (&ndev->rs_timer) &ndev->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_node_0 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 cb_lock genl_mutex team->team_lock_key#3 irq_context: 0 cb_lock genl_mutex team->team_lock_key#3 fs_reclaim irq_context: 0 cb_lock genl_mutex team->team_lock_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex team->team_lock_key#3 &c->lock irq_context: 0 cb_lock genl_mutex team->team_lock_key#3 &n->list_lock irq_context: 0 cb_lock genl_mutex team->team_lock_key#3 &n->list_lock &c->lock irq_context: 0 cb_lock genl_mutex team->team_lock_key#3 &rq->__lock irq_context: 0 cb_lock genl_mutex team->team_lock_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex team->team_lock_key#3 pool_lock#2 irq_context: 0 cb_lock genl_mutex team->team_lock_key#3 &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex team->team_lock_key#3 rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex team->team_lock_key#3 rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex team->team_lock_key#3 rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 cb_lock genl_mutex team->team_lock_key#3 rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex team->team_lock_key#3 rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex team->team_lock_key#3 rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 cb_lock genl_mutex team->team_lock_key#3 rlock-AF_NETLINK irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 &p->lock &of->mutex kn->active#5 fs_reclaim &rq->__lock irq_context: 0 &p->lock &of->mutex kn->active#5 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 rcu_state.barrier_mutex rcu_read_lock &rq->__lock irq_context: 0 rcu_state.barrier_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER/2 pool_lock#2 irq_context: 0 &mm->mmap_lock &anon_vma->rwsem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_node_0 irq_context: 0 sb_writers#8 tomoyo_ss rcu_node_0 irq_context: 0 sb_writers#8 tomoyo_ss &rcu_state.expedited_wq irq_context: 0 sb_writers#8 tomoyo_ss &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#8 tomoyo_ss &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 tomoyo_ss &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 batched_entropy_u8.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 kfence_freelist_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[0] &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex pcpu_alloc_mutex &cfs_rq->removed.lock irq_context: 0 proto_tab_lock &c->lock irq_context: 0 sk_lock-AF_NFC irq_context: 0 sk_lock-AF_NFC slock-AF_NFC irq_context: 0 sk_lock-AF_NFC &k->list_lock irq_context: 0 sk_lock-AF_NFC &k->k_lock irq_context: 0 slock-AF_NFC irq_context: 0 &xa->xa_lock#8 &n->list_lock irq_context: 0 &xa->xa_lock#8 &n->list_lock &c->lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &____s->seqcount#2 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &____s->seqcount irq_context: 0 rtnl_mutex &c->lock batched_entropy_u8.lock irq_context: 0 rtnl_mutex &c->lock batched_entropy_u8.lock crngs.lock irq_context: 0 rtnl_mutex &c->lock kfence_freelist_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &journal->j_list_lock key#15 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &cfs_rq->removed.lock irq_context: softirq &x->wait#26 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 batched_entropy_u8.lock crngs.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rq->__lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rq->__lock &base->lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override &n->list_lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex &pnn->routes.lock &rq->__lock irq_context: 0 rtnl_mutex &pnn->routes.lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex dev_pm_qos_sysfs_mtx &rq->__lock irq_context: 0 rtnl_mutex dev_pm_qos_sysfs_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 rtnl_mutex &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 rtnl_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 slock-AF_INET6 elock-AF_INET6 irq_context: 0 &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 lock#4 rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 lock#4 &obj_hash[i].lock irq_context: 0 rtnl_mutex smc_ib_devices.mutex &rq->__lock irq_context: 0 rtnl_mutex smc_ib_devices.mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) percpu_counters_lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex jump_label_mutex.wait_lock irq_context: 0 bpf_stats_enabled_mutex cpu_hotplug_lock jump_label_mutex.wait_lock irq_context: 0 bpf_stats_enabled_mutex cpu_hotplug_lock &p->pi_lock irq_context: 0 bpf_stats_enabled_mutex cpu_hotplug_lock &p->pi_lock &rq->__lock irq_context: 0 bpf_stats_enabled_mutex cpu_hotplug_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 bpf_stats_enabled_mutex cpu_hotplug_lock &rq->__lock irq_context: 0 bpf_stats_enabled_mutex cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_read_lock &net->ipv6.fib6_walker_lock irq_context: 0 rtnl_mutex rcu_read_lock mfc_unres_lock irq_context: 0 rtnl_mutex rcu_read_lock mfc_unres_lock#2 irq_context: 0 rtnl_mutex _xmit_ETHER &n->list_lock irq_context: 0 rtnl_mutex _xmit_ETHER &n->list_lock &c->lock irq_context: 0 rtnl_mutex &br->lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &br->lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &br->lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key/2 irq_context: 0 rtnl_mutex _xmit_NETROM#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &table->lock#3 irq_context: 0 rtnl_mutex &this->info_list_lock irq_context: 0 &p->lock remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &ndev->lock irq_context: 0 sb_writers#4 &mapping->private_lock rcu_read_lock &p->pi_lock irq_context: 0 sb_writers#4 &mapping->private_lock rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &mapping->private_lock rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rq->__lock irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &ndev->lock irq_context: 0 cb_lock rtnl_mutex &dev->power.lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 pool_lock#2 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 rcu_read_lock _xmit_ETHER &c->lock irq_context: 0 rtnl_mutex &app->lock irq_context: 0 rtnl_mutex (&app->join_timer) irq_context: 0 rtnl_mutex (&app->periodic_timer) irq_context: 0 rtnl_mutex &list->lock#11 irq_context: 0 rtnl_mutex (&app->join_timer)#2 irq_context: 0 rtnl_mutex &app->lock#2 irq_context: 0 rtnl_mutex &list->lock#12 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 rcu_read_lock _xmit_ETHER &obj_hash[i].lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 rcu_read_lock _xmit_ETHER krc.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &mm->mmap_lock irq_context: 0 sk_lock-AF_INET remove_cache_srcu &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) fill_pool_map-wait-type-override pool_lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 kn->active#15 &kernfs_locks->open_file_mutex[count] &n->list_lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 kn->active#15 &kernfs_locks->open_file_mutex[count] &n->list_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->forw_bat_list_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex &x->wait#27 irq_context: 0 &sb->s_type->i_mutex_key#9 &xa->xa_lock#8 irq_context: 0 &sb->s_type->i_mutex_key#9 &fsnotify_mark_srcu irq_context: 0 &sb->s_type->i_mutex_key#9 sysctl_lock &x->wait#27 irq_context: 0 &sb->s_type->i_mutex_key#9 sysctl_lock &x->wait#27 &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#9 sysctl_lock &x->wait#27 &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#9 sysctl_lock &x->wait#27 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#9 &dentry->d_lock &wq#2 irq_context: 0 rtnl_mutex rcu_read_lock sysctl_lock irq_context: 0 rtnl_mutex rcu_read_lock &sb->s_type->i_lock_key#23 irq_context: 0 rtnl_mutex &sb->s_type->i_lock_key#23 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sysctl_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sysctl_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sysctl_lock krc.lock irq_context: 0 rtnl_mutex _xmit_PHONET_PIPE irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tvlv.container_list_lock &____s->seqcount irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &rcu_state.expedited_wq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tracepoints_mutex cpu_hotplug_lock &rq->__lock irq_context: 0 &p->lock &of->mutex kn->active#5 remove_cache_srcu &rq->__lock irq_context: 0 &p->lock &of->mutex kn->active#5 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &p->lock &of->mutex kn->active#5 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &p->lock &of->mutex kn->active#5 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &p->lock &of->mutex kn->active#5 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &p->lock &of->mutex kn->active#5 remove_cache_srcu pool_lock#2 irq_context: 0 &group->mark_mutex fs_reclaim &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock &ul->lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem rcu_node_0 irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->roc_work)->work) &local->mtx &data->mutex irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->roc_work)->work) &local->mtx rcu_read_lock &local->queue_stop_reason_lock irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->roc_work)->work) &local->mtx rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->roc_work)->work) &local->mtx rcu_read_lock hwsim_radio_lock irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->roc_work)->work) &local->mtx rcu_read_lock hwsim_radio_lock pool_lock#2 irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->roc_work)->work) &local->mtx rcu_read_lock hwsim_radio_lock &c->lock irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->roc_work)->work) &local->mtx rcu_read_lock hwsim_radio_lock &list->lock#16 irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->roc_work)->work) &local->mtx rcu_read_lock &list->lock#16 irq_context: softirq &(&conn->disc_work)->timer irq_context: softirq &(&conn->disc_work)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&conn->disc_work)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&conn->disc_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq &(&conn->disc_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&conn->disc_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&conn->disc_work)->work) irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&conn->disc_work)->work) pool_lock#2 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&conn->disc_work)->work) &list->lock#7 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci2#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&conn->disc_work)->work) irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&conn->disc_work)->work) &list->lock#7 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci0#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&conn->disc_work)->work) irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&conn->disc_work)->work) &list->lock#7 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&conn->disc_work)->work) irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&conn->disc_work)->work) &list->lock#7 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&conn->disc_work)->work) irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&conn->disc_work)->work) batched_entropy_u8.lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&conn->disc_work)->work) kfence_freelist_lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&conn->disc_work)->work) &list->lock#7 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)hci1#2 (work_completion)(&(&conn->disc_work)->work) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock sb_writers#4 &n->list_lock irq_context: 0 &mm->mmap_lock sb_writers#4 &n->list_lock &c->lock irq_context: 0 defrag6_mutex irq_context: 0 cb_lock genl_mutex &ht->lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock rcu_read_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock (&req->rsk_timer) irq_context: 0 sk_lock-AF_TIPC &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_TIPC &tn->nametbl_lock &nt->cluster_scope_lock irq_context: 0 (wq_completion)tipc_send#2 irq_context: 0 (wq_completion)tipc_send#2 (work_completion)(&con->swork) irq_context: 0 (wq_completion)tipc_send#2 (work_completion)(&con->swork) &con->outqueue_lock irq_context: 0 (wq_completion)tipc_send#2 (work_completion)(&con->swork) &list->lock#42 irq_context: 0 (wq_completion)tipc_send#2 (work_completion)(&con->swork) slock-AF_TIPC &list->lock#42 irq_context: 0 (wq_completion)tipc_send#2 (work_completion)(&con->swork) &con->outqueue_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_TIPC &tipc_net(net)->bclock irq_context: 0 (wq_completion)tipc_send#2 (work_completion)(&con->swork) slock-AF_TIPC &c->lock irq_context: 0 (wq_completion)tipc_send#2 (work_completion)(&con->swork) slock-AF_TIPC &obj_hash[i].lock irq_context: 0 (wq_completion)tipc_send#2 (work_completion)(&con->swork) &list->lock#23 irq_context: 0 (wq_completion)tipc_send#2 (work_completion)(&con->swork) slock-AF_TIPC &list->lock#23 irq_context: 0 (wq_completion)tipc_send#2 (work_completion)(&con->swork) &c->lock irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &service->lock &c->lock irq_context: 0 sk_lock-AF_TIPC slock-AF_TIPC &____s->seqcount#2 irq_context: 0 sk_lock-AF_TIPC slock-AF_TIPC &____s->seqcount irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &service->lock &n->list_lock irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &service->lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &sub->lock &c->lock irq_context: 0 (wq_completion)tipc_send#2 (work_completion)(&con->swork) &rq->__lock irq_context: 0 (wq_completion)tipc_send#2 (work_completion)(&con->swork) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)tipc_send#2 (work_completion)(&con->swork) slock-AF_TIPC pool_lock#2 irq_context: 0 (wq_completion)tipc_send#2 (work_completion)(&con->swork) slock-AF_TIPC rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)tipc_send#2 (work_completion)(&con->swork) slock-AF_TIPC rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 (wq_completion)tipc_send#2 (work_completion)(&con->swork) slock-AF_TIPC rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)tipc_send#2 (work_completion)(&con->swork) slock-AF_TIPC rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)tipc_send#2 (work_completion)(&con->swork) slock-AF_TIPC &____s->seqcount#2 irq_context: 0 (wq_completion)tipc_send#2 (work_completion)(&con->swork) slock-AF_TIPC &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &sub->lock rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tn->nametbl_lock &service->lock &sub->lock &c->lock irq_context: 0 (wq_completion)tipc_send#2 (work_completion)(&con->swork) &____s->seqcount#2 irq_context: 0 (wq_completion)tipc_send#2 (work_completion)(&con->swork) &____s->seqcount irq_context: 0 (wq_completion)tipc_send#2 (work_completion)(&con->swork) pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tipc_net(net)->bclock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)tipc_send#2 (work_completion)(&con->swork) &srv->idr_lock irq_context: 0 (wq_completion)tipc_send#2 (work_completion)(&con->swork) &con->outqueue_lock pool_lock#2 irq_context: 0 (wq_completion)tipc_send#2 (work_completion)(&con->swork) &obj_hash[i].lock irq_context: 0 (wq_completion)tipc_send#2 (work_completion)(&con->swork) &srv->idr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)tipc_send#2 (work_completion)(&con->swork) &srv->idr_lock pool_lock#2 irq_context: 0 &sig->cred_guard_mutex &tsk->futex_exit_mutex &mm->mmap_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &tsk->futex_exit_mutex &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex sk_lock-AF_INET &____s->seqcount irq_context: 0 rtnl_mutex sk_lock-AF_INET &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 rtnl_mutex sk_lock-AF_INET rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 nf_sockopt_mutex nf_sockopt_mutex.wait_lock irq_context: 0 &group->notification_waitq &p->pi_lock irq_context: 0 nf_sockopt_mutex.wait_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &obj_hash[i].lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET &f->f_owner.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &n->list_lock &c->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock &n->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock pool_lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock &n->lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock &n->lock &____s->seqcount#9 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock nl_table_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock nl_table_wait.lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock rcu_read_lock lock#8 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock rcu_read_lock id_table_lock irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock &dir->lock#2 irq_context: 0 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock krc.lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock &____s->seqcount irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cgroup_threadgroup_rwsem &obj_hash[i].lock pool_lock irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 rcu_read_lock &sighand->siglock irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 &sighand->siglock irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 &sighand->siglock stock_lock irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 &sighand->siglock pool_lock#2 irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 &sighand->siglock &p->pi_lock irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 &sighand->siglock &p->pi_lock &rq->__lock irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 &sighand->siglock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &n->list_lock &c->lock irq_context: 0 bpf_stats_enabled_mutex cpu_hotplug_lock jump_label_mutex text_mutex.wait_lock irq_context: 0 bpf_stats_enabled_mutex cpu_hotplug_lock jump_label_mutex &p->pi_lock irq_context: 0 bpf_stats_enabled_mutex cpu_hotplug_lock jump_label_mutex &p->pi_lock &rq->__lock irq_context: 0 bpf_stats_enabled_mutex cpu_hotplug_lock jump_label_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 &xs->mutex umem_ida.xa_lock irq_context: 0 &xs->mutex &rq->__lock irq_context: 0 &xs->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xs->mutex &mm->mmap_lock irq_context: 0 &xs->mutex &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &xs->mutex &mm->mmap_lock fs_reclaim irq_context: 0 &xs->mutex &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &xs->mutex &mm->mmap_lock &____s->seqcount irq_context: 0 &xs->mutex &mm->mmap_lock pool_lock#2 irq_context: 0 &xs->mutex &mm->mmap_lock stock_lock irq_context: 0 &xs->mutex &mm->mmap_lock &rq->__lock irq_context: 0 &xs->mutex &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &n->list_lock &c->lock irq_context: 0 &xs->mutex &mm->mmap_lock ptlock_ptr(page)#2 lock#4 irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &xs->mutex &mm->mmap_lock ptlock_ptr(page)#2 lock#4 &lruvec->lru_lock irq_context: 0 &xs->mutex &mm->mmap_lock ptlock_ptr(page)#2 key irq_context: 0 &xs->mutex &mm->mmap_lock &pcp->lock &zone->lock irq_context: 0 cb_lock genl_mutex &pernet->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#3/2 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#3/2 &macvlan_netdev_addr_lock_key/1 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#3/2 &macvlan_netdev_addr_lock_key/1 _xmit_ETHER irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#3/2 pool_lock#2 irq_context: 0 (wq_completion)bond5 irq_context: 0 (wq_completion)bond5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &dev->tx_global_lock &qdisc_xmit_lock_key#2 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#3/2 &obj_hash[i].lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key#3/2 krc.lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock pool_lock#2 irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex flowtable_lock &wq->mutex &rq->__lock irq_context: 0 rtnl_mutex flowtable_lock &wq->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &ipvlan->addrs_lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#3/2 irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#3/2 &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#3/2 krc.lock irq_context: 0 rtnl_mutex (work_completion)(&port->wq) irq_context: 0 rtnl_mutex (work_completion)(&port->wq) &rq->__lock irq_context: 0 rtnl_mutex (work_completion)(&port->wq) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &in_dev->mc_tomb_lock &c->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &in_dev->mc_tomb_lock &n->list_lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &in_dev->mc_tomb_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) console_owner_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) console_owner irq_context: 0 (wq_completion)hci4#2 (work_completion)(&(&hdev->cmd_timer)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &xa->xa_lock#8 stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &dentry->d_lock &dentry->d_lock/1 &lru->node[i].lock rcu_read_lock &p->pi_lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 rcu_read_lock_bh &zone->lock irq_context: 0 rcu_read_lock_bh &____s->seqcount irq_context: 0 rcu_read_lock_bh rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock_bh rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 rcu_read_lock_bh rcu_read_lock rcu_read_lock &ul->lock irq_context: 0 rcu_read_lock_bh rcu_read_lock batched_entropy_u32.lock irq_context: 0 rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 rcu_read_lock_bh rcu_read_lock rcu_read_lock &c->lock irq_context: 0 rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock &zone->lock irq_context: softirq rcu_read_lock rcu_read_lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&umem->work) irq_context: 0 (wq_completion)events (work_completion)(&umem->work) umem_ida.xa_lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) vmap_area_lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) purge_vmap_area_lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&umem->work) &lruvec->lru_lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&umem->work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&umem->work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond6 irq_context: 0 (wq_completion)bond6 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond6 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond6 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_nat_locks[i] irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &base->lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) rcu_node_0 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->roc_work)->work) &local->mtx &local->queue_stop_reason_lock irq_context: 0 (wq_completion)phy11 (work_completion)(&(&local->roc_work)->work) &local->mtx rcu_read_lock &fq->lock irq_context: 0 &xs->mutex &____s->seqcount#2 irq_context: 0 &xs->mutex &mm->mmap_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sk_lock-AF_INET fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 &xs->mutex &mm->mmap_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 tk_core.seq.seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex smc_ib_devices.mutex &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex smc_ib_devices.mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex smc_ib_devices.mutex &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 sctp_assocs_id_lock &obj_hash[i].lock pool_lock irq_context: 0 &kernfs_locks->open_file_mutex[count] fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 rtnl_mutex &idev->mc_lock &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pack_mutex text_mutex.wait_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) purge_vmap_area_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) purge_vmap_area_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pack_mutex text_mutex text_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pack_mutex text_mutex &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pack_mutex text_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pack_mutex text_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pack_mutex text_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pack_mutex text_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pack_mutex text_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xs->mutex &mm->mmap_lock rcu_node_0 irq_context: 0 sb_writers#5 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xs->mutex &sem->wait_lock irq_context: 0 &xs->mutex &p->pi_lock irq_context: 0 &xs->mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &xs->mutex &p->pi_lock &rq->__lock irq_context: 0 &xs->mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_node_0 irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET krc.lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET krc.lock &base->lock irq_context: 0 sk_lock-AF_INET krc.lock &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond7 irq_context: 0 (wq_completion)bond7 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond7 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond7 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond7 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &x->wait#10 irq_context: 0 (wq_completion)bond7 (work_completion)(&barr->work) irq_context: 0 (wq_completion)bond7 (work_completion)(&barr->work) &x->wait#10 irq_context: 0 (wq_completion)bond7 (work_completion)(&barr->work) &x->wait#10 &p->pi_lock irq_context: 0 (wq_completion)bond7 (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond7 (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond7 (work_completion)(&barr->work) &rq->__lock irq_context: 0 (wq_completion)bond7 (work_completion)(&barr->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET sctp_assocs_id_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &____s->seqcount irq_context: softirq (&n->timer) &n->list_lock irq_context: softirq (&n->timer) &n->list_lock &c->lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &xs->mutex &pcp->lock &zone->lock irq_context: 0 &xs->mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem devices_rwsem.wait_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem devices_rwsem.wait_lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond8 irq_context: 0 (wq_completion)bond8 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond8 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond8 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &ndev->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh icmp_global.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh icmp_global.lock batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh k-slock-AF_INET6 rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh k-slock-AF_INET6 rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh k-slock-AF_INET6 rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh k-slock-AF_INET6 rcu_read_lock rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh k-slock-AF_INET6 rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh k-slock-AF_INET6 rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh k-slock-AF_INET6 rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh k-slock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh k-slock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh k-slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 rtnl_mutex &block->cb_lock flow_indr_block_lock &c->lock irq_context: 0 pernet_ops_rwsem devices_rwsem.wait_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem devices.xa_lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem.wait_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rxe->usdev_lock rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rxe->usdev_lock rtnl_mutex &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rxe->usdev_lock rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem (&timer.timer) irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rxe->usdev_lock rtnl_mutex.wait_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rxe->usdev_lock &p->pi_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rxe->usdev_lock &p->pi_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rxe->usdev_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rxe->usdev_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem rtnl_mutex.wait_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem lock kernfs_idr_lock &n->list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem lock kernfs_idr_lock &n->list_lock &c->lock irq_context: 0 &nft_net->commit_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &sem->wait_lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock jump_label_mutex &rq->__lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock jump_label_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &cfs_rq->removed.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rcu_state.expedited_wq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem batched_entropy_u8.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem kfence_freelist_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &sem->wait_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem remove_cache_srcu irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem remove_cache_srcu quarantine_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem remove_cache_srcu &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem remove_cache_srcu &n->list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem remove_cache_srcu &obj_hash[i].lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem remove_cache_srcu &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem remove_cache_srcu rcu_node_0 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem remove_cache_srcu pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &xa->xa_lock#17 &n->list_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &xa->xa_lock#17 &n->list_lock &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem rcu_node_0 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex &c->lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &rcu_state.expedited_wq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem uevent_sock_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers &type->i_mutex_dir_key/1 tomoyo_ss &____s->seqcount#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem clients_rwsem &device->client_data_rwsem &cfs_rq->removed.lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex uevent_sock_mutex.wait_lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 &____s->seqcount#8 irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 batched_entropy_u32.lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 &table->hash[i].lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 &table->hash[i].lock k-clock-AF_INET6 irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 &table->hash[i].lock &table->hash2[i].lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex k-slock-AF_INET6 irq_context: 0 sk_lock-AF_RXRPC &rx->call_lock irq_context: 0 sk_lock-AF_RXRPC (rxrpc_call_limiter).lock irq_context: 0 sk_lock-AF_RXRPC &obj_hash[i].lock irq_context: 0 sk_lock-AF_RXRPC tk_core.seq.seqcount irq_context: 0 sk_lock-AF_RXRPC &call->waitq irq_context: 0 sk_lock-AF_RXRPC &call->user_mutex irq_context: 0 sk_lock-AF_RXRPC &call->user_mutex &rx->call_lock irq_context: 0 sk_lock-AF_RXRPC &call->user_mutex &rxnet->call_lock irq_context: 0 sk_lock-AF_RXRPC &call->user_mutex slock-AF_RXRPC irq_context: 0 &call->user_mutex irq_context: 0 &call->user_mutex slock-AF_RXRPC irq_context: 0 &call->user_mutex fs_reclaim irq_context: 0 &call->user_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &call->user_mutex pool_lock#2 irq_context: 0 &call->user_mutex &rq->__lock irq_context: 0 &call->user_mutex tk_core.seq.seqcount irq_context: 0 &call->user_mutex &rxnet->peer_hash_lock irq_context: 0 &call->user_mutex &local->client_bundles_lock irq_context: 0 &call->user_mutex &local->client_call_lock irq_context: 0 &call->user_mutex &p->pi_lock irq_context: 0 &call->user_mutex &p->pi_lock &rq->__lock irq_context: 0 &call->user_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &local->client_call_lock irq_context: 0 &call->user_mutex &call->waitq irq_context: 0 &call->waitq irq_context: 0 &call->user_mutex &call->tx_lock irq_context: 0 &call->user_mutex &local->lock irq_context: 0 &local->lock irq_context: 0 &peer->lock irq_context: 0 &call->tx_lock irq_context: 0 rcu_read_lock &rx->recvmsg_lock irq_context: 0 (&call->timer) irq_context: 0 sk_lock-AF_RXRPC &rx->recvmsg_lock irq_context: 0 &call->user_mutex &list->lock#21 irq_context: 0 &sb->s_type->i_mutex_key#10 (rxrpc_call_limiter).lock irq_context: 0 &sb->s_type->i_mutex_key#10 &rx->recvmsg_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &rx->call_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &rxnet->call_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (&call->timer) irq_context: 0 &sb->s_type->i_mutex_key#10 &list->lock#21 irq_context: 0 &local->client_bundles_lock irq_context: 0 (&conn->timer) irq_context: 0 (work_completion)(&conn->processor) irq_context: 0 &list->lock#43 irq_context: 0 &rxnet->peer_hash_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex.wait_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock batched_entropy_u32.lock crngs.lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx stock_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx stack_depot_init_mutex irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx pcpu_alloc_mutex irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx pcpu_alloc_mutex pcpu_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->iflist_mtx irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &xa->xa_lock#3 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx net_rwsem irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx net_rwsem &list->lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &tn->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock rcu_node_0 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &x->wait#9 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &k->list_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx gdp_mutex irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx gdp_mutex &k->list_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx lock kernfs_idr_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx bus_type_sem irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx sysfs_symlink_target_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sem->wait_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &p->pi_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &p->pi_lock &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx lock kernfs_idr_lock pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &dev->power.lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx dpm_list_mtx irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &n->list_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &n->list_lock &c->lock irq_context: 0 &call->user_mutex &c->lock irq_context: 0 &call->user_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &call->waitq &p->pi_lock irq_context: 0 &call->waitq &p->pi_lock &rq->__lock irq_context: 0 &call->waitq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex fs_reclaim irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex nl_table_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex nl_table_wait.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq (&tw->tw_timer) &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &tbl->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &n->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &n->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &n->lock &base->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &n->lock &base->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &n->lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &ndev->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &ul->lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock rlock-AF_PACKET irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 rcu_read_lock &obj_hash[i].lock irq_context: softirq &(&net->ipv6.addr_chk_work)->timer irq_context: softirq &(&net->ipv6.addr_chk_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&net->ipv6.addr_chk_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_RXRPC &rq->__lock irq_context: softirq &(&net->ipv6.addr_chk_work)->timer rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sk_lock-AF_RXRPC &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&net->ipv6.addr_chk_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex &pool->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &k->k_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx subsys mutex#17 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx subsys mutex#17 &k->k_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &dir->lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx dev_hotplug_mutex irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx dev_hotplug_mutex &dev->power.lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx dev_base_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx input_pool.lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx batched_entropy_u32.lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &tbl->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx sysctl_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx failover_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx proc_subdir_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx proc_inum_ida.xa_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx proc_subdir_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock fs_reclaim irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock &c->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock _xmit_ETHER irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock _xmit_ETHER &c->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock _xmit_ETHER pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &pnettable->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx smc_ib_devices.mutex irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &ndev->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rlock-AF_NETLINK irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock &n->list_lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh noop_qdisc.q.lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &lock->wait_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &p->pi_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &p->pi_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &zone->lock &____s->seqcount irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &lock->wait_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem pool_lock#2 irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rcu_state.expedited_wq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &sem->wait_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem rcu_node_0 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &rcu_state.expedited_wq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem uevent_sock_mutex &____s->seqcount#2 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem uevent_sock_mutex &____s->seqcount irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem uevent_sock_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem link_ops_rwsem uevent_sock_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex deferred_probe_mutex &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &n->list_lock &c->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx pcpu_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx krc.lock irq_context: 0 &rxnet->call_lock irq_context: 0 &list->lock#21 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 pernet_ops_rwsem rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&n->timer) &n->lock &____s->seqcount#2 irq_context: softirq &(&hdev->cmd_timer)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: softirq (&n->timer) &n->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem &net->ipv6.ip6addrlbl_table.lock &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex remove_cache_srcu irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex remove_cache_srcu quarantine_lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex remove_cache_srcu &c->lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex remove_cache_srcu &n->list_lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex remove_cache_srcu pool_lock#2 irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_RXRPC &rxnet->local_mutex rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex.wait_lock irq_context: 0 lock link_idr_lock &____s->seqcount#2 irq_context: 0 lock link_idr_lock &pcp->lock &zone->lock irq_context: 0 lock link_idr_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 lock link_idr_lock &____s->seqcount irq_context: 0 &pipe->mutex/1 rtnl_mutex irq_context: 0 &pipe->mutex/1 rtnl_mutex fs_reclaim irq_context: 0 &pipe->mutex/1 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &pipe->mutex/1 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &pipe->mutex/1 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 rtnl_mutex pool_lock#2 irq_context: 0 &pipe->mutex/1 rtnl_mutex (console_sem).lock irq_context: 0 &pipe->mutex/1 rtnl_mutex console_lock console_srcu console_owner_lock irq_context: 0 &pipe->mutex/1 rtnl_mutex console_lock console_srcu console_owner irq_context: 0 &pipe->mutex/1 rtnl_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 &pipe->mutex/1 rtnl_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 &pipe->mutex/1 rtnl_mutex &rq->__lock irq_context: 0 &pipe->mutex/1 rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 unix_gc_lock irq_context: 0 &u->iolock unix_gc_lock irq_context: 0 &pipe->mutex/1 rtnl_mutex &____s->seqcount irq_context: 0 &pipe->mutex/1 rtnl_mutex stock_lock irq_context: 0 &pipe->mutex/1 rtnl_mutex per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &pipe->mutex/1 rtnl_mutex stack_depot_init_mutex irq_context: 0 &pipe->mutex/1 rtnl_mutex crngs.lock irq_context: 0 &pipe->mutex/1 rtnl_mutex &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 rtnl_mutex &c->lock irq_context: 0 &pipe->mutex/1 rtnl_mutex &____s->seqcount#2 irq_context: 0 &pipe->mutex/1 rtnl_mutex rcu_read_lock pool_lock#2 irq_context: 0 &pipe->mutex/1 rtnl_mutex pcpu_alloc_mutex irq_context: 0 &pipe->mutex/1 rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 &pipe->mutex/1 rtnl_mutex batched_entropy_u32.lock irq_context: 0 &pipe->mutex/1 rtnl_mutex &xa->xa_lock#3 irq_context: 0 &pipe->mutex/1 rtnl_mutex net_rwsem irq_context: 0 &pipe->mutex/1 rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 &pipe->mutex/1 rtnl_mutex &tn->lock irq_context: 0 &pipe->mutex/1 rtnl_mutex &x->wait#9 irq_context: 0 &pipe->mutex/1 rtnl_mutex &k->list_lock irq_context: 0 &pipe->mutex/1 rtnl_mutex gdp_mutex irq_context: 0 &pipe->mutex/1 rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 &pipe->mutex/1 rtnl_mutex lock irq_context: 0 &pipe->mutex/1 rtnl_mutex lock kernfs_idr_lock irq_context: 0 &pipe->mutex/1 rtnl_mutex &root->kernfs_rwsem irq_context: 0 &pipe->mutex/1 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &pipe->mutex/1 rtnl_mutex bus_type_sem irq_context: 0 &pipe->mutex/1 rtnl_mutex sysfs_symlink_target_lock irq_context: 0 &pipe->mutex/1 rtnl_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &meta->lock irq_context: 0 &sb->s_type->i_mutex_key#10 kfence_freelist_lock irq_context: 0 &pipe->mutex/1 rtnl_mutex &root->kernfs_rwsem irq_context: 0 &pipe->mutex/1 rtnl_mutex &dev->power.lock irq_context: 0 &pipe->mutex/1 rtnl_mutex dpm_list_mtx irq_context: 0 &pipe->mutex/1 rtnl_mutex uevent_sock_mutex irq_context: 0 &pipe->mutex/1 rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 &pipe->mutex/1 rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &pipe->mutex/1 rtnl_mutex uevent_sock_mutex remove_cache_srcu irq_context: 0 &pipe->mutex/1 rtnl_mutex uevent_sock_mutex remove_cache_srcu quarantine_lock irq_context: 0 &pipe->mutex/1 rtnl_mutex uevent_sock_mutex remove_cache_srcu &c->lock irq_context: 0 &pipe->mutex/1 rtnl_mutex uevent_sock_mutex remove_cache_srcu &n->list_lock irq_context: 0 &pipe->mutex/1 rtnl_mutex uevent_sock_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &pipe->mutex/1 rtnl_mutex uevent_sock_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 rtnl_mutex uevent_sock_mutex remove_cache_srcu pool_lock#2 irq_context: 0 &pipe->mutex/1 rtnl_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 &pipe->mutex/1 rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 &pipe->mutex/1 rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 &pipe->mutex/1 rtnl_mutex subsys mutex#17 irq_context: 0 &pipe->mutex/1 rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 &pipe->mutex/1 rtnl_mutex lock kernfs_idr_lock &c->lock irq_context: 0 &pipe->mutex/1 rtnl_mutex &dir->lock#2 irq_context: 0 &pipe->mutex/1 rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 &pipe->mutex/1 rtnl_mutex dev_hotplug_mutex irq_context: 0 &pipe->mutex/1 rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 &pipe->mutex/1 rtnl_mutex dev_base_lock irq_context: 0 &pipe->mutex/1 rtnl_mutex input_pool.lock irq_context: 0 &pipe->mutex/1 rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 &pipe->mutex/1 rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 rtnl_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &pipe->mutex/1 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &pipe->mutex/1 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &pipe->mutex/1 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 rtnl_mutex &tbl->lock irq_context: 0 &pipe->mutex/1 rtnl_mutex sysctl_lock irq_context: 0 &pipe->mutex/1 rtnl_mutex nl_table_lock irq_context: 0 &pipe->mutex/1 rtnl_mutex nl_table_wait.lock irq_context: 0 &pipe->mutex/1 rtnl_mutex failover_lock irq_context: 0 &pipe->mutex/1 rtnl_mutex batched_entropy_u32.lock crngs.lock irq_context: 0 &pipe->mutex/1 rtnl_mutex proc_subdir_lock irq_context: 0 &pipe->mutex/1 rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 &pipe->mutex/1 rtnl_mutex proc_subdir_lock irq_context: 0 &pipe->mutex/1 rtnl_mutex &idev->mc_lock irq_context: 0 &pipe->mutex/1 rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 &pipe->mutex/1 rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &pipe->mutex/1 rtnl_mutex &idev->mc_lock pool_lock#2 irq_context: 0 &pipe->mutex/1 rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 &pipe->mutex/1 rtnl_mutex &idev->mc_lock _xmit_ETHER pool_lock#2 irq_context: 0 &pipe->mutex/1 rtnl_mutex &pnettable->lock irq_context: 0 &pipe->mutex/1 rtnl_mutex smc_ib_devices.mutex irq_context: 0 &pipe->mutex/1 rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 &pipe->mutex/1 rtnl_mutex.wait_lock irq_context: 0 &pipe->mutex/1 rlock-AF_NETLINK irq_context: 0 &pipe->mutex/1 purge_vmap_area_lock &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 purge_vmap_area_lock pool_lock#2 irq_context: 0 &pipe->mutex/1 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 kn->active#5 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#8 kn->active#5 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 krc.lock &obj_hash[i].lock irq_context: 0 krc.lock &base->lock irq_context: 0 krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_nat_locks[i] irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex k-sk_lock-AF_INET6 &rq->__lock irq_context: 0 key#24 irq_context: 0 cb_lock genl_mutex &sdata->sec_mtx irq_context: 0 cb_lock genl_mutex &sdata->sec_mtx fs_reclaim irq_context: 0 cb_lock genl_mutex &sdata->sec_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex &sdata->sec_mtx pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_node_0 irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 sk_lock-AF_TIPC &zone->lock irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC tk_core.seq.seqcount irq_context: 0 sk_lock-AF_TIPC &list->lock#5 irq_context: softirq rcu_read_lock &zone->lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &ei->socket.wq.wait irq_context: 0 epnested_mutex &ep->mtx/1 irq_context: 0 epnested_mutex &ep->mtx stock_lock irq_context: 0 epnested_mutex &ep->mtx &ep->mtx/1 irq_context: 0 epnested_mutex &ep->mtx &ep->mtx/1 &f->f_lock irq_context: 0 epnested_mutex &ep->mtx wakeup_ida.xa_lock irq_context: 0 epnested_mutex &ep->mtx &x->wait#9 irq_context: 0 epnested_mutex &ep->mtx &obj_hash[i].lock irq_context: 0 epnested_mutex &ep->mtx &k->list_lock irq_context: 0 epnested_mutex &ep->mtx gdp_mutex irq_context: 0 epnested_mutex &ep->mtx gdp_mutex &k->list_lock irq_context: 0 epnested_mutex &ep->mtx gdp_mutex fs_reclaim irq_context: 0 epnested_mutex &ep->mtx gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 epnested_mutex &ep->mtx gdp_mutex lock irq_context: 0 epnested_mutex &ep->mtx gdp_mutex lock kernfs_idr_lock irq_context: 0 epnested_mutex &ep->mtx gdp_mutex &rq->__lock irq_context: 0 epnested_mutex &ep->mtx gdp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 epnested_mutex &ep->mtx gdp_mutex &root->kernfs_rwsem irq_context: 0 epnested_mutex &ep->mtx gdp_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 epnested_mutex &ep->mtx gdp_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 epnested_mutex &ep->mtx gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 epnested_mutex &ep->mtx gdp_mutex &sem->wait_lock irq_context: 0 epnested_mutex &ep->mtx gdp_mutex &p->pi_lock irq_context: 0 epnested_mutex &ep->mtx gdp_mutex &p->pi_lock &rq->__lock irq_context: 0 epnested_mutex &ep->mtx gdp_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 epnested_mutex &ep->mtx lock irq_context: 0 epnested_mutex &ep->mtx lock kernfs_idr_lock irq_context: 0 epnested_mutex &ep->mtx &root->kernfs_rwsem irq_context: 0 epnested_mutex &ep->mtx &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 epnested_mutex &ep->mtx bus_type_sem irq_context: 0 epnested_mutex &ep->mtx sysfs_symlink_target_lock irq_context: 0 epnested_mutex &ep->mtx &____s->seqcount#2 irq_context: 0 epnested_mutex &ep->mtx &n->list_lock irq_context: 0 epnested_mutex &ep->mtx &n->list_lock &c->lock irq_context: 0 epnested_mutex &ep->mtx uevent_sock_mutex irq_context: 0 epnested_mutex &ep->mtx uevent_sock_mutex fs_reclaim irq_context: 0 epnested_mutex &ep->mtx uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 epnested_mutex &ep->mtx uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 epnested_mutex &ep->mtx uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 epnested_mutex &ep->mtx uevent_sock_mutex &c->lock irq_context: 0 epnested_mutex &ep->mtx uevent_sock_mutex nl_table_lock irq_context: 0 epnested_mutex &ep->mtx uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 epnested_mutex &ep->mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 epnested_mutex &ep->mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 epnested_mutex &ep->mtx uevent_sock_mutex nl_table_wait.lock irq_context: 0 epnested_mutex &ep->mtx uevent_sock_mutex &obj_hash[i].lock irq_context: 0 epnested_mutex &ep->mtx subsys mutex#15 irq_context: 0 epnested_mutex &ep->mtx subsys mutex#15 &k->k_lock irq_context: 0 epnested_mutex &ep->mtx events_lock irq_context: 0 epnested_mutex &ep->mtx &dentry->d_lock irq_context: 0 epnested_mutex &ep->mtx &sem->wait_lock irq_context: 0 epnested_mutex &ep->mtx &p->pi_lock irq_context: 0 epnested_mutex &ep->mtx &p->pi_lock &rq->__lock irq_context: 0 epnested_mutex &ep->mtx &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 epnested_mutex &ep->mtx &rq->__lock irq_context: 0 epnested_mutex &ep->mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 epnested_mutex &ep->mtx &ep->poll_wait irq_context: 0 epnested_mutex &ep->mtx &ep->mtx/1 &ep->lock irq_context: 0 &ep->mtx remove_cache_srcu &rq->__lock irq_context: 0 &ep->mtx remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &____s->seqcount#2 irq_context: 0 sb_writers#8 &____s->seqcount irq_context: 0 rcu_read_lock &ei->socket.wq.wait &ep->poll_wait/1 &ep->lock irq_context: 0 rcu_read_lock &ei->socket.wq.wait &ep->poll_wait/1 &ep->lock rcu_read_lock &ws->lock irq_context: 0 rcu_read_lock &ei->socket.wq.wait &ep->poll_wait/1 &ep->lock rcu_read_lock &ws->lock tk_core.seq.seqcount irq_context: 0 rcu_read_lock &ei->socket.wq.wait &ep->poll_wait/1 &ep->lock rcu_read_lock &ws->lock &obj_hash[i].lock irq_context: 0 &ep->mtx rcu_read_lock &ep->poll_wait irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex krc.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex netpoll_srcu irq_context: 0 vlan_ioctl_mutex rtnl_mutex &pn->hash_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &dev->tx_global_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &dev->tx_global_lock &vlan_netdev_xmit_lock_key irq_context: 0 vlan_ioctl_mutex rtnl_mutex &dev->tx_global_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 vlan_ioctl_mutex rtnl_mutex &sch->q.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &vlan_netdev_addr_lock_key/1 _xmit_ETHER &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &vlan_netdev_addr_lock_key/1 _xmit_ETHER pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &vlan_netdev_addr_lock_key/1 _xmit_ETHER krc.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex _xmit_ETHER irq_context: 0 vlan_ioctl_mutex rtnl_mutex _xmit_ETHER &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex _xmit_ETHER pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex _xmit_ETHER krc.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex __ip_vs_mutex irq_context: 0 vlan_ioctl_mutex rtnl_mutex __ip_vs_mutex &ipvs->dest_trash_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &vlan_netdev_addr_lock_key/1 &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &vlan_netdev_addr_lock_key/1 krc.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &im->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex fib_info_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex fib_info_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex fib_info_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex class irq_context: 0 vlan_ioctl_mutex rtnl_mutex (&tbl->proxy_timer) irq_context: 0 vlan_ioctl_mutex rtnl_mutex flowtable_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex flowtable_lock &ht->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex flowtable_lock rcu_read_lock &ht->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex flowtable_lock rcu_read_lock rcu_node_0 irq_context: 0 vlan_ioctl_mutex rtnl_mutex flowtable_lock rcu_read_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex flowtable_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex flowtable_lock &(&flowtable->gc_work)->timer irq_context: 0 vlan_ioctl_mutex rtnl_mutex flowtable_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex flowtable_lock &base->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex flowtable_lock &base->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex flowtable_lock rcu_read_lock &pool->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex flowtable_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex flowtable_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex flowtable_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex flowtable_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex flowtable_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex flowtable_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex flowtable_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex flowtable_lock (work_completion)(&(&flowtable->gc_work)->work) irq_context: 0 vlan_ioctl_mutex rtnl_mutex flowtable_lock (wq_completion)nf_ft_offload_add irq_context: 0 vlan_ioctl_mutex rtnl_mutex flowtable_lock &wq->mutex irq_context: 0 vlan_ioctl_mutex rtnl_mutex flowtable_lock &wq->mutex &pool->lock/1 irq_context: 0 vlan_ioctl_mutex rtnl_mutex flowtable_lock &wq->mutex &x->wait#10 irq_context: 0 vlan_ioctl_mutex rtnl_mutex flowtable_lock (wq_completion)nf_ft_offload_del irq_context: 0 vlan_ioctl_mutex rtnl_mutex flowtable_lock (wq_completion)nf_ft_offload_stats irq_context: 0 vlan_ioctl_mutex rtnl_mutex &dir->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock rt6_exception_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock nl_table_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock nl_table_wait.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock &data->fib_event_queue_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock &n->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock &n->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock &n->lock &____s->seqcount#9 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock nl_table_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock nl_table_wait.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock rcu_read_lock lock#8 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock rcu_read_lock id_table_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock &dir->lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tbl->lock krc.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &net->ipv6.addrconf_hash_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &net->ipv6.addrconf_hash_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock rcu_read_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &ndev->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &ndev->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &ndev->lock &base->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &ndev->lock &base->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &ifa->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 krc.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &base->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &base->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &tb->tb6_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &dir->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock krc.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_query_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_query_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (work_completion)(&(&idev->mc_report_work)->work) irq_context: 0 vlan_ioctl_mutex rtnl_mutex &net->ipv6.fib6_gc_lock rcu_read_lock &tb->tb6_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &net->ipv6.fib6_gc_lock rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &net->ipv6.fib6_gc_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex dev_base_lock &xa->xa_lock#3 irq_context: 0 vlan_ioctl_mutex rtnl_mutex cpu_hotplug_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex cpu_hotplug_lock &list->lock#5 irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 vlan_ioctl_mutex rtnl_mutex bpf_devs_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &hwstats->hwsdev_list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &in_dev->mc_tomb_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &data->fib_event_queue_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_node_0 irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem fs_reclaim irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem nl_table_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem nl_table_wait.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem fib_info_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem fib_info_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem fib_info_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &tbl->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem class irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem (&tbl->proxy_timer) irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &base->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex (inetaddr_chain).rwsem krc.lock irq_context: 0 &mm->mmap_lock mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex sysctl_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex sysctl_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex sysctl_lock krc.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &ul->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &net->xdp.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex mirred_list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &nft_net->commit_mutex irq_context: 0 vlan_ioctl_mutex rtnl_mutex &ent->pde_unload_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock krc.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_report_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &pnn->pndevs.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &pnn->routes.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex target_list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_node_0 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex kernfs_idr_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex dev_pm_qos_sysfs_mtx irq_context: 0 vlan_ioctl_mutex rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 vlan_ioctl_mutex rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 vlan_ioctl_mutex rtnl_mutex dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 vlan_ioctl_mutex rtnl_mutex subsys mutex#17 &k->k_lock klist_remove_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem rcu_read_lock rcu_node_0 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex &sem->wait_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &p->pi_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &p->pi_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex deferred_probe_mutex irq_context: 0 vlan_ioctl_mutex rtnl_mutex device_links_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex mmu_notifier_invalidate_range_start irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex mmu_notifier_invalidate_range_start irq_context: 0 vlan_ioctl_mutex rtnl_mutex cpu_hotplug_lock xps_map_mutex irq_context: 0 vlan_ioctl_mutex rcu_state.barrier_mutex irq_context: 0 vlan_ioctl_mutex rcu_state.barrier_mutex rcu_state.barrier_lock irq_context: 0 vlan_ioctl_mutex rcu_state.barrier_mutex rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rcu_state.barrier_mutex &x->wait#24 irq_context: 0 vlan_ioctl_mutex rcu_state.barrier_mutex &rq->__lock irq_context: 0 vlan_ioctl_mutex rcu_state.barrier_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex dev_base_lock irq_context: 0 vlan_ioctl_mutex lweventlist_lock irq_context: 0 vlan_ioctl_mutex pcpu_lock irq_context: 0 vlan_ioctl_mutex pool_lock#2 irq_context: 0 vlan_ioctl_mutex &dir->lock#2 irq_context: 0 vlan_ioctl_mutex &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex krc.lock irq_context: 0 vlan_ioctl_mutex &dir->lock#2 &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex &dir->lock#2 pool_lock#2 irq_context: 0 vlan_ioctl_mutex netdev_unregistering_wq.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &sem->wait_lock irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &zone->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) rcu_node_0 irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&timer) rcu_read_lock &pcp->lock &zone->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock init_task.mems_allowed_seq.seqcount irq_context: softirq (&peer->timer_send_keepalive) init_task.mems_allowed_seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &zone->lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC slock-AF_TIPC rcu_read_lock &ei->socket.wq.wait irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC slock-AF_TIPC rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC slock-AF_TIPC rcu_read_lock &ei->socket.wq.wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC slock-AF_TIPC rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC slock-AF_TIPC rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 (&timer.timer) irq_context: softirq (&peer->timer_send_keepalive) &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg1#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu rcu_node_0 irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &rcu_state.expedited_wq irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex rcu_node_0 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex &rcu_state.expedited_wq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#12 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock init_task.mems_allowed_seq.seqcount irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem __ip_vs_app_mutex &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex krc.lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex krc.lock &base->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem uevent_sock_mutex.wait_lock irq_context: 0 &sig->cred_guard_mutex &ei->xattr_sem rcu_read_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &ei->xattr_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 epnested_mutex &ep->mtx &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 epnested_mutex &ep->mtx &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex &rq->__lock irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 remove_cache_srcu pool_lock#2 irq_context: 0 kn->active#19 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 kn->active#19 &kernfs_locks->open_file_mutex[count] &n->list_lock irq_context: 0 kn->active#19 &kernfs_locks->open_file_mutex[count] &n->list_lock &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &cfs_rq->removed.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock &c->lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rcu_node_0 irq_context: 0 epnested_mutex &ep->mtx gdp_mutex &c->lock irq_context: 0 sb_writers#8 kn->active#5 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &ei->xattr_sem &rq->__lock irq_context: 0 &sig->cred_guard_mutex &ei->xattr_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex &rq->__lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock &rq->__lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 rcu_node_0 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock rcu_node_0 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 kn->active#17 &kernfs_locks->open_file_mutex[count] fs_reclaim &rq->__lock irq_context: 0 kn->active#17 &kernfs_locks->open_file_mutex[count] fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock &n->list_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock &n->list_lock &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override pool_lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex remove_cache_srcu irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex remove_cache_srcu quarantine_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex remove_cache_srcu &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex remove_cache_srcu &n->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex remove_cache_srcu pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex remove_cache_srcu &rq->__lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_node_0 irq_context: 0 &p->lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)phy16 irq_context: 0 (wq_completion)phy16 (work_completion)(&local->reconfig_filter) irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6 k-slock-AF_INET6 fill_pool_map-wait-type-override pool_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &rcu_state.expedited_wq irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)phy16 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 sb_writers#5 rcu_node_0 irq_context: 0 sb_writers#5 &rcu_state.expedited_wq irq_context: 0 sb_writers#5 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#5 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq hrtimer_bases.lock fill_pool_map-wait-type-override init_task.mems_allowed_seq.seqcount irq_context: 0 sb_writers#3 tomoyo_ss rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#3 tomoyo_ss rcu_read_lock &rq->__lock irq_context: 0 sb_writers#3 tomoyo_ss rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (gc_work).work &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &vma->vm_lock->lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &vma->vm_lock->lock &rcu_state.expedited_wq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &vma->vm_lock->lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#5 lock#4 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#5 lock#4 &obj_hash[i].lock irq_context: 0 (wq_completion)phy17 irq_context: 0 (wq_completion)phy17 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy17 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#3 remove_cache_srcu irq_context: 0 sb_writers#3 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#3 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#3 remove_cache_srcu &c->lock irq_context: 0 sb_writers#3 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#3 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#3 remove_cache_srcu &obj_hash[i].lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock batched_entropy_u8.lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock kfence_freelist_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &meta->lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 pernet_ops_rwsem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)phy18 irq_context: 0 (wq_completion)phy18 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy18 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &rq->__lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rcu_read_lock pgd_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock key irq_context: 0 pernet_ops_rwsem rcu_read_lock pcpu_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock percpu_counters_lock irq_context: 0 &sig->cred_guard_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_PACKET init_mm.page_table_lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex pgd_lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex key irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex pcpu_lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex percpu_counters_lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex pool_lock#2 irq_context: 0 &mm->mmap_lock pcpu_lock stock_lock irq_context: 0 cb_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 cb_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 cb_lock genl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_TUNNEL6#2 rcu_read_lock rlock-AF_PACKET irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_nat_locks[i] irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg2#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 epnested_mutex &ep->mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 epnested_mutex &ep->mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 epnested_mutex &ep->mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 epnested_mutex &ep->mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 epnested_mutex &ep->mtx uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 epnested_mutex &ep->mtx uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 epnested_mutex &ep->mtx uevent_sock_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)wg-crypt-wg2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex uevent_sock_mutex.wait_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rfkill_global_mutex uevent_sock_mutex.wait_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &p->pi_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fs_reclaim &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &p->lock &of->mutex kn->active#19 &rq->__lock irq_context: 0 &p->lock &of->mutex kn->active#19 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sem->wait_lock irq_context: 0 cb_lock genl_mutex fill_pool_map-wait-type-override &n->list_lock irq_context: 0 cb_lock genl_mutex fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 cb_lock genl_mutex cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock pool_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim &rq->__lock irq_context: 0 cb_lock genl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &root->kernfs_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock genl_mutex remove_cache_srcu &rq->__lock irq_context: 0 cb_lock genl_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_read_lock rcu_node_0 irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rq->__lock irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rfkill_global_mutex bus_type_sem &rq->__lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex bus_type_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#16 &kernfs_locks->open_file_mutex[count] &n->list_lock irq_context: 0 kn->active#16 &kernfs_locks->open_file_mutex[count] &n->list_lock &c->lock irq_context: 0 kn->active#19 &n->list_lock irq_context: 0 kn->active#19 &n->list_lock &c->lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG fs_reclaim irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG &c->lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG pool_lock#2 irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG &____s->seqcount#2 irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG &____s->seqcount irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG inet_diag_table_mutex irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG inet_diag_table_mutex &rq->__lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG inet_diag_table_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG inet_diag_table_mutex &h->lhash2[i].lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG inet_diag_table_mutex &h->lhash2[i].lock clock-AF_INET6 irq_context: 0 cb_lock genl_mutex fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 cb_lock genl_mutex fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &sem->wait_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override &n->list_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 rcu_node_0 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#4 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 kn->active#18 &rq->__lock irq_context: 0 kn->active#18 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem &cfs_rq->removed.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem pool_lock#2 irq_context: 0 sock_diag_mutex sock_diag_table_mutex rcu_read_lock pool_lock#2 irq_context: 0 sock_diag_mutex sock_diag_table_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 sock_diag_mutex sock_diag_table_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 sock_diag_mutex sock_diag_table_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 sock_diag_mutex sock_diag_table_mutex rcu_read_lock rcu_node_0 irq_context: 0 sock_diag_mutex sock_diag_table_mutex rcu_read_lock &rq->__lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex rlock-AF_NETLINK irq_context: 0 kn->active#17 &____s->seqcount#2 irq_context: 0 kn->active#17 &____s->seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex rcu_read_lock rcu_node_0 irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex rcu_read_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx gdp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rfkill_global_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex fill_pool_map-wait-type-override &n->list_lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 cb_lock genl_mutex rfkill_global_mutex fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx &idev->mc_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#19 &____s->seqcount#2 irq_context: 0 kn->active#19 &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx remove_cache_srcu irq_context: 0 cb_lock genl_mutex rtnl_mutex &rdev->wiphy.mtx remove_cache_srcu quarantine_lock irq_context: 0 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex deferred_probe_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &meta->lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &obj_hash[i].lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback &meta->lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback kfence_freelist_lock irq_context: 0 rtnl_mutex _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)phy19 irq_context: 0 (wq_completion)phy19 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy19 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 &u->iolock &mm->mmap_lock ptlock_ptr(page)#2 lock#4 &lruvec->lru_lock irq_context: 0 &u->iolock &mm->mmap_lock ptlock_ptr(page)#2 lock#4 &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &type->s_umount_key#23/1 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy20 irq_context: 0 (wq_completion)phy20 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy20 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &rdev->wiphy.mtx rtnl_mutex.wait_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &rdev->wiphy.mtx &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &lock->wait_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_UNIX reuseport_lock irq_context: 0 sk_lock-AF_CAN clock-AF_CAN irq_context: 0 sk_lock-AF_CAN proc_subdir_lock irq_context: 0 sk_lock-AF_CAN proc_inum_ida.xa_lock irq_context: 0 sk_lock-AF_CAN proc_subdir_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN proc_subdir_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN &ent->pde_unload_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN proc_inum_ida.xa_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 sb_writers#3 tomoyo_ss &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&p->wq) purge_vmap_area_lock &meta->lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) purge_vmap_area_lock kfence_freelist_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)phy21 irq_context: 0 (wq_completion)phy21 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy21 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)phy22 irq_context: 0 (wq_completion)phy22 &rq->__lock irq_context: 0 (wq_completion)phy22 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy22 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_node_0 irq_context: 0 &mm->mmap_lock remove_cache_srcu &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &n->list_lock &c->lock irq_context: 0 (wq_completion)phy23 irq_context: 0 (wq_completion)phy23 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy23 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &c->lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers &type->i_mutex_dir_key#2 rcu_read_lock &rq->__lock irq_context: 0 sb_writers &type->i_mutex_dir_key#2 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#5 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#8 kn->active#5 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 kn->active#5 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &pipe->mutex/1 fs_reclaim &rq->__lock irq_context: 0 &pipe->mutex/1 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &batadv_netdev_addr_lock_key irq_context: 0 &group->mark_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &group->mark_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &bat_priv->softif_vlan_list_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex pgd_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex key irq_context: 0 pernet_ops_rwsem rtnl_mutex pcpu_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex percpu_counters_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex pcpu_lock stock_lock irq_context: 0 (wq_completion)phy24 irq_context: 0 (wq_completion)phy24 (work_completion)(&local->reconfig_filter) irq_context: 0 sk_lock-AF_KEY irq_context: 0 sk_lock-AF_KEY slock-AF_KEY irq_context: 0 slock-AF_KEY irq_context: 0 rtnl_mutex &bond->mode_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock rlock-AF_KEY irq_context: 0 &net->xfrm.xfrm_cfg_mutex &pfk->dump_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &pfk->dump_lock &net->xfrm.xfrm_policy_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_policy_lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 rtnl_mutex &bat_priv->tvlv.handler_list_lock &n->list_lock irq_context: 0 rtnl_mutex &bat_priv->tvlv.handler_list_lock &n->list_lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex remove_cache_srcu &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock &bat_priv->tt.changes_list_lock irq_context: 0 rtnl_mutex rcu_read_lock &bat_priv->tt.changes_list_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &bat_priv->tt.changes_list_lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock key#16 irq_context: 0 rtnl_mutex &batadv_netdev_addr_lock_key pool_lock#2 irq_context: 0 (wq_completion)phy24 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: 0 nfnl_subsys_ctnetlink pool_lock#2 irq_context: 0 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 nfnl_subsys_ctnetlink rcu_read_lock &nf_nat_locks[i] irq_context: 0 nfnl_subsys_ctnetlink rcu_read_lock pool_lock#2 irq_context: 0 nfnl_subsys_ctnetlink rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 nfnl_subsys_ctnetlink rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 nfnl_subsys_ctnetlink rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 &type->i_mutex_dir_key#4 &root->kernfs_rwsem fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &rq->__lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#5 rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#5 rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#5 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock &batadv_netdev_addr_lock_key &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &bat_priv->mcast.mla_lock &bat_priv->tt.changes_list_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &bat_priv->mcast.mla_lock &bat_priv->tt.changes_list_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &bat_priv->mcast.mla_lock &obj_hash[i].lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 rcu_read_lock &batadv_netdev_addr_lock_key irq_context: 0 rtnl_mutex &dev->tx_global_lock &qdisc_xmit_lock_key#3 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem rcu_node_0 irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem &rcu_state.expedited_wq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &anon_vma->rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 rtnl_mutex &batadv_netdev_addr_lock_key &c->lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_node_0 irq_context: 0 rtnl_mutex (work_completion)(&(&bond->mii_work)->work) irq_context: 0 rtnl_mutex (work_completion)(&(&bond->arp_work)->work) irq_context: 0 rtnl_mutex (work_completion)(&(&bond->alb_work)->work) irq_context: 0 rtnl_mutex (work_completion)(&(&bond->ad_work)->work) irq_context: 0 pcpu_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 pernet_ops_rwsem &xt[i].mutex &lock->wait_lock irq_context: 0 pernet_ops_rwsem &xt[i].mutex &rq->__lock irq_context: 0 pernet_ops_rwsem &xt[i].mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&slave->notify_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 rtnl_mutex (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 rtnl_mutex (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex (work_completion)(&(&bond->slave_arr_work)->work) irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 &batadv_netdev_addr_lock_key irq_context: 0 rtnl_mutex &batadv_netdev_addr_lock_key &obj_hash[i].lock irq_context: 0 rtnl_mutex &batadv_netdev_addr_lock_key krc.lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 krc.lock irq_context: 0 rtnl_mutex &net->xfrm.xfrm_state_lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 krc.lock irq_context: 0 rtnl_mutex &bond->mode_lock &c->lock irq_context: 0 rtnl_mutex &bond->mode_lock pool_lock#2 irq_context: 0 rtnl_mutex &bond->mode_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex &bond->mode_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 rtnl_mutex &bond->mode_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 rtnl_mutex &bond->mode_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 rtnl_mutex &bond->mode_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 rtnl_mutex &bond->mode_lock (console_sem).lock irq_context: 0 rtnl_mutex &bond->mode_lock console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex &bond->mode_lock console_lock console_srcu console_owner irq_context: 0 rtnl_mutex &bond->mode_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex &bond->mode_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 rtnl_mutex &bond->mode_lock rcu_read_lock (console_sem).lock irq_context: 0 rtnl_mutex &bond->mode_lock rcu_read_lock console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex &bond->mode_lock rcu_read_lock console_lock console_srcu console_owner irq_context: 0 rtnl_mutex &bond->mode_lock rcu_read_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex &bond->mode_lock rcu_read_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 rtnl_mutex &dev->tx_global_lock &batadv_netdev_xmit_lock_key irq_context: 0 rtnl_mutex &bat_priv->tt.changes_list_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &bat_priv->tt.changes_list_lock pool_lock#2 irq_context: 0 rtnl_mutex &bond->mode_lock rcu_read_lock_bh key#16 irq_context: 0 rtnl_mutex &bond->mode_lock rcu_read_lock_bh &bat_priv->tt.changes_list_lock irq_context: 0 sb_writers#4 sb_internal rcu_node_0 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 rtnl_mutex &root->kernfs_rwsem &meta->lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem kfence_freelist_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem &ht->mutex &rq->__lock irq_context: 0 rtnl_mutex pcpu_alloc_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem &wq->mutex &rq->__lock irq_context: 0 pernet_ops_rwsem &wq->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy25 irq_context: 0 (wq_completion)phy25 (work_completion)(&local->reconfig_filter) irq_context: 0 (wq_completion)phy25 (work_completion)(&local->reconfig_filter) &local->filter_lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &cfs_rq->removed.lock irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 &c->lock irq_context: 0 rtnl_mutex flowtable_lock rcu_read_lock &pool->lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &bat_priv->mcast.mla_lock &c->lock irq_context: 0 rtnl_mutex &batadv_netdev_addr_lock_key &n->list_lock irq_context: 0 rtnl_mutex &batadv_netdev_addr_lock_key &n->list_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh kfence_freelist_lock irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_state_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &journal->j_state_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&barr->work) &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &rxe->usdev_lock &p->pi_lock &cfs_rq->removed.lock irq_context: softirq (&n->timer) &meta->lock irq_context: softirq (&n->timer) kfence_freelist_lock irq_context: 0 &type->i_mutex_dir_key#5 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex dev_base_lock &xa->xa_lock#3 &obj_hash[i].lock irq_context: 0 rtnl_mutex dev_base_lock &xa->xa_lock#3 &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex dev_base_lock &xa->xa_lock#3 pool_lock#2 irq_context: softirq &(&slave->notify_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &rcu_state.expedited_wq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &bat_priv->mcast.mla_lock &n->list_lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 rtnl_mutex net_rwsem &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &bat_priv->mcast.mla_lock &n->list_lock &c->lock irq_context: 0 remove_cache_srcu rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 remove_cache_srcu rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &p->alloc_lock &x->wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 sk_lock-AF_INET6 lock sctp_assocs_id_lock &c->lock irq_context: 0 sk_lock-AF_INET6 lock sctp_assocs_id_lock &n->list_lock irq_context: 0 sk_lock-AF_INET6 lock sctp_assocs_id_lock &n->list_lock &c->lock irq_context: softirq &(&bond->slave_arr_work)->timer irq_context: softirq &(&bond->slave_arr_work)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&bond->slave_arr_work)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&bond->slave_arr_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq &(&bond->slave_arr_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&bond->slave_arr_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 &meta->lock irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 kfence_freelist_lock irq_context: 0 rtnl_mutex &bond->mode_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq &(&bond->mii_work)->timer irq_context: softirq &(&bond->mii_work)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&bond->mii_work)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&bond->mii_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq &(&bond->ad_work)->timer irq_context: softirq &(&bond->ad_work)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&bond->ad_work)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&bond->ad_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex &bond->mode_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 tomoyo_ss mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 tomoyo_ss mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 remove_cache_srcu rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&barr->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &nft_net->commit_mutex (work_completion)(&(&priv->gc_work)->work) irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss batched_entropy_u8.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss kfence_freelist_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &meta->lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rcu_state.expedited_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem stock_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)events_power_efficient (gc_work).work &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_power_efficient (gc_work).work &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (gc_work).work &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem pgd_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem key irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem pcpu_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem percpu_counters_lock irq_context: 0 rtnl_mutex &bat_priv->tt.changes_list_lock &meta->lock irq_context: 0 rtnl_mutex &bat_priv->tt.changes_list_lock kfence_freelist_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex pool_lock#2 irq_context: softirq (&hsr->announce_timer) rcu_read_lock batched_entropy_u8.lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock batched_entropy_u8.lock crngs.lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock kfence_freelist_lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock &meta->lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock batched_entropy_u8.lock crngs.lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 tasklist_lock &sighand->siglock &n->list_lock irq_context: 0 tasklist_lock &sighand->siglock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_PACKET &po->bind_lock rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_PACKET &po->bind_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events fqdir_free_work quarantine_lock irq_context: 0 (wq_completion)events free_ipc_work &rnp->exp_wq[0] irq_context: 0 (wq_completion)events free_ipc_work &pool->lock irq_context: 0 (wq_completion)events free_ipc_work &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &____s->seqcount#2 irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &pcp->lock &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 &obj_hash[i].lock pool_lock irq_context: 0 tomoyo_ss mount_lock irq_context: 0 tomoyo_ss mount_lock rcu_read_lock rename_lock.seqcount irq_context: 0 sk_lock-AF_CAIF irq_context: 0 sk_lock-AF_CAIF slock-AF_CAIF irq_context: 0 sk_lock-AF_CAIF &obj_hash[i].lock irq_context: 0 slock-AF_CAIF irq_context: 0 sk_lock-AF_CAIF elock-AF_CAIF irq_context: 0 &sb->s_type->i_mutex_key#10 rlock-AF_CAIF irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF slock-AF_CAIF irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF &this->info_list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF (console_sem).lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex uevent_sock_mutex nl_table_wait.lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem nl_table_wait.lock &p->pi_lock irq_context: 0 pernet_ops_rwsem nl_table_wait.lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF &ei->socket.wq.wait irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF clock-AF_CAIF irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF elock-AF_CAIF irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_CAIF irq_context: 0 &sb->s_type->i_mutex_key#10 elock-AF_CAIF irq_context: 0 sk_lock-AF_VSOCK vsock_table_lock irq_context: 0 sk_lock-AF_VSOCK vsock_table_lock batched_entropy_u32.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: softirq (&peer->timer_new_handshake) irq_context: softirq (&peer->timer_new_handshake) &peer->endpoint_lock irq_context: softirq (&peer->timer_new_handshake) &peer->endpoint_lock &obj_hash[i].lock irq_context: 0 sb_writers#3 tomoyo_ss &____s->seqcount#2 irq_context: softirq (&peer->timer_new_handshake) &peer->endpoint_lock pool_lock#2 irq_context: softirq (&peer->timer_new_handshake) rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq (&peer->timer_new_handshake) rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: softirq (&peer->timer_new_handshake) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq (&peer->timer_new_handshake) rcu_read_lock_bh rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: softirq (&peer->timer_new_handshake) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock base_crng.lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock crngs.lock irq_context: softirq (&app->join_timer) &app->lock batched_entropy_u32.lock crngs.lock base_crng.lock irq_context: 0 pernet_ops_rwsem batched_entropy_u8.lock crngs.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_lock_key &xa->xa_lock#8 &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_lock_key &xa->xa_lock#8 pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &info->lock key#9 irq_context: 0 &p->lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &p->lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex remove_cache_srcu &____s->seqcount irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rcu_state.expedited_wq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 rcu_read_lock &batadv_netdev_addr_lock_key irq_context: 0 rcu_read_lock &vma->vm_lock->lock &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 krc.lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 krc.lock &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 krc.lock &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock &sdata->lock irq_context: 0 cb_lock &sdata->lock &rq->__lock irq_context: 0 cb_lock &sdata->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rcu_state.exp_mutex rcu_node_0 irq_context: 0 rtnl_mutex &bond->mode_lock &n->list_lock irq_context: 0 rtnl_mutex &bond->mode_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex &bond->mode_lock rcu_read_lock_bh &c->lock irq_context: 0 rtnl_mutex &bond->mode_lock rcu_read_lock_bh &____s->seqcount#2 irq_context: 0 rtnl_mutex &bond->mode_lock rcu_read_lock_bh &____s->seqcount irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &journal->j_state_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 cb_lock rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 cb_lock rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 cb_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &sem->wait_lock irq_context: 0 cb_lock rcu_state.exp_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem dev_pm_qos_sysfs_mtx &sem->wait_lock irq_context: 0 cb_lock rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem dev_pm_qos_sysfs_mtx &p->pi_lock irq_context: 0 cb_lock rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &p->pi_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 &dir->lock &meta->lock irq_context: 0 &dir->lock kfence_freelist_lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex pool_lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 &batadv_netdev_addr_lock_key &obj_hash[i].lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 &batadv_netdev_addr_lock_key pool_lock#2 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 &batadv_netdev_addr_lock_key krc.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &bond->mode_lock rcu_read_lock lweventlist_lock irq_context: 0 rtnl_mutex &bond->mode_lock rcu_read_lock lweventlist_lock &c->lock irq_context: 0 rtnl_mutex &bond->mode_lock rcu_read_lock lweventlist_lock pool_lock#2 irq_context: 0 rtnl_mutex &bond->mode_lock rcu_read_lock lweventlist_lock &dir->lock#2 irq_context: 0 rtnl_mutex &bond->mode_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &bond->mode_lock rcu_read_lock &base->lock irq_context: 0 rtnl_mutex &bond->mode_lock rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock irq_context: 0 rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex krc.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &rnp->exp_wq[1] irq_context: 0 (wq_completion)events free_ipc_work &xa->xa_lock#4 &obj_hash[i].lock pool_lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 sb_writers#14 &rq->__lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 &resv_map->lock irq_context: 0 &mm->mmap_lock &sb->s_type->i_mutex_key#21 hugetlb_lock irq_context: 0 namespace_sem rcu_read_lock rcu_node_0 irq_context: 0 namespace_sem rcu_read_lock &rq->__lock irq_context: 0 namespace_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem ipvs->est_mutex rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem ipvs->est_mutex rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem ipvs->est_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 uevent_sock_mutex uevent_sock_mutex.wait_lock irq_context: 0 uevent_sock_mutex fs_reclaim irq_context: 0 uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 namespace_sem fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 namespace_sem fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu quarantine_lock irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu &c->lock irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu &n->list_lock irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key &n->list_lock irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key &n->list_lock &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock rcu_node_0 irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond4 irq_context: 0 (wq_completion)bond4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 rtnl_mutex rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 rtnl_mutex rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &bond->ipsec_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &____s->seqcount#2 irq_context: 0 (wq_completion)bond4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex quarantine_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_node_0 irq_context: softirq &(&bat_priv->dat.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rcu_state.exp_mutex &cfs_rq->removed.lock irq_context: 0 rcu_state.exp_mutex pool_lock#2 irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)events free_ipc_work sysctl_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex pgd_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex key irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex pcpu_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex percpu_counters_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &rcu_state.expedited_wq irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex fs_reclaim &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex fs_reclaim &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET6 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 sb_writers#8 &of->mutex kn->active#5 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_unbound connector_reaper_work &ACCESS_PRIVATE(ssp->srcu_sup, lock) rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sk_lock-AF_INET6 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bond5#2 irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex kfence_freelist_lock irq_context: 0 rtnl_mutex rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond5#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock pgd_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock key irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock pcpu_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock percpu_counters_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 lock pidmap_lock batched_entropy_u8.lock irq_context: 0 lock pidmap_lock kfence_freelist_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock krc.lock &base->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 cb_lock cpu_hotplug_lock irq_context: 0 cb_lock cpu_hotplug_lock jump_label_mutex irq_context: 0 cb_lock cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 cb_lock cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 rtnl_mutex uevent_sock_mutex &cfs_rq->removed.lock irq_context: 0 sb_writers &rq->__lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 pgd_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock pool_lock#2 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 key irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 pcpu_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 percpu_counters_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 pcpu_lock stock_lock irq_context: 0 (wq_completion)bond6#2 irq_context: 0 (wq_completion)bond6#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond6#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond6#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond6#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond6#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond6#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond6#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work &base->lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock stock_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &ndev->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock stock_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock k-slock-AF_INET6 rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 sk_lock-AF_PACKET &rnp->exp_wq[3] irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rnp->exp_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 slock-AF_INET6 &sk->sk_lock.wq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &sta->rate_ctrl_lock &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &sta->rate_ctrl_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond7#2 irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond7#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 sk_lock-AF_INET6 &sctp_ep_hashtable[i].lock irq_context: 0 sk_lock-AF_INET6 sk_lock-AF_INET6/1 irq_context: 0 sk_lock-AF_INET6 sk_lock-AF_INET6/1 slock-AF_INET6 irq_context: 0 sb_writers#4 sb_internal &rcu_state.expedited_wq irq_context: 0 hashlimit_mutex irq_context: 0 hashlimit_mutex fs_reclaim irq_context: 0 hashlimit_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 hashlimit_mutex pool_lock#2 irq_context: 0 hashlimit_mutex free_vmap_area_lock irq_context: 0 hashlimit_mutex vmap_area_lock irq_context: 0 hashlimit_mutex &____s->seqcount irq_context: 0 hashlimit_mutex init_mm.page_table_lock irq_context: 0 hashlimit_mutex &rq->__lock irq_context: 0 hashlimit_mutex proc_subdir_lock irq_context: 0 hashlimit_mutex proc_inum_ida.xa_lock irq_context: 0 hashlimit_mutex proc_subdir_lock irq_context: 0 hashlimit_mutex &obj_hash[i].lock irq_context: 0 hashlimit_mutex &base->lock irq_context: 0 hashlimit_mutex &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &ei->i_es_lock key#7 irq_context: 0 hashlimit_mutex &ent->pde_unload_lock irq_context: 0 (work_completion)(&(&hinfo->gc_work)->work) irq_context: 0 &hinfo->lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 &mapping->private_lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 delayed_uprobe_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex &br->lock lweventlist_lock &c->lock irq_context: 0 &hdev->req_lock rcu_state.exp_mutex rcu_node_0 irq_context: 0 &hdev->req_lock rcu_state.exp_mutex &rq->__lock irq_context: 0 &hdev->req_lock rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &hdev->req_lock rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &hdev->req_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &hdev->req_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &hdev->req_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &hdev->req_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &hdev->req_lock rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &hdev->req_lock rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock stock_lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock pcpu_lock stock_lock irq_context: 0 &hdev->req_lock &wq->mutex irq_context: 0 &hdev->req_lock (wq_completion)hci0#2 irq_context: 0 &hdev->req_lock &wq->mutex &pool->lock/1 irq_context: 0 &hdev->req_lock &wq->mutex &x->wait#10 irq_context: 0 &hdev->req_lock &hdev->lock irq_context: 0 &hdev->req_lock &hdev->lock hci_cb_list_lock irq_context: 0 &hdev->req_lock &hdev->lock hci_cb_list_lock &list->lock#10 irq_context: 0 &hdev->req_lock &hdev->lock hci_cb_list_lock &obj_hash[i].lock irq_context: 0 &hdev->req_lock &hdev->lock hci_cb_list_lock (work_completion)(&(&conn->id_addr_timer)->work) irq_context: 0 &hdev->req_lock &hdev->lock hci_cb_list_lock &conn->chan_lock irq_context: 0 &hdev->req_lock &hdev->lock hci_cb_list_lock rcu_state.exp_mutex rcu_node_0 irq_context: 0 &hdev->req_lock &hdev->lock hci_cb_list_lock rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &hdev->req_lock &hdev->lock hci_cb_list_lock rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &hdev->req_lock &hdev->lock hci_cb_list_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &hdev->req_lock &hdev->lock hci_cb_list_lock rcu_state.exp_mutex &rq->__lock irq_context: 0 &hdev->req_lock &hdev->lock hci_cb_list_lock rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &hdev->req_lock &hdev->lock hci_cb_list_lock rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 &hdev->req_lock &hdev->lock hci_cb_list_lock &list->lock#9 irq_context: 0 &hdev->req_lock &hdev->lock hci_cb_list_lock pool_lock#2 irq_context: 0 &hdev->req_lock &hdev->lock hci_cb_list_lock (work_completion)(&(&conn->info_timer)->work) irq_context: 0 &hdev->req_lock &hdev->lock hci_cb_list_lock rcu_read_lock &pool->lock irq_context: 0 &hdev->req_lock &hdev->lock &obj_hash[i].lock irq_context: 0 &hdev->req_lock &hdev->lock (work_completion)(&(&conn->disc_work)->work) irq_context: 0 &hdev->req_lock &hdev->lock rcu_read_lock &pool->lock/1 irq_context: 0 &hdev->req_lock &hdev->lock (work_completion)(&(&conn->auto_accept_work)->work) irq_context: 0 &hdev->req_lock &hdev->lock (work_completion)(&(&conn->idle_work)->work) irq_context: 0 &hdev->req_lock &hdev->lock &list->lock#9 irq_context: 0 &hdev->req_lock &hdev->lock &rq->__lock irq_context: 0 &hdev->req_lock &hdev->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &hdev->req_lock &hdev->lock rcu_state.exp_mutex rcu_node_0 irq_context: 0 &hdev->req_lock &hdev->lock rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &hdev->req_lock &hdev->lock rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &hdev->req_lock &hdev->lock rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &hdev->req_lock &hdev->lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &hdev->req_lock &hdev->lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &hdev->req_lock &hdev->lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &hdev->req_lock &hdev->lock rcu_state.exp_mutex &rq->__lock irq_context: 0 &hdev->req_lock &hdev->lock rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &hdev->req_lock &hdev->lock rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 &hdev->req_lock &hdev->lock &k->k_lock irq_context: 0 &hdev->req_lock &hdev->lock &root->kernfs_rwsem irq_context: 0 &hdev->req_lock &hdev->lock &root->kernfs_rwsem irq_context: 0 &hdev->req_lock &hdev->lock dev_pm_qos_sysfs_mtx irq_context: 0 &hdev->req_lock &hdev->lock dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 &hdev->req_lock &hdev->lock dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 &hdev->req_lock &hdev->lock dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 &hdev->req_lock &hdev->lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 &hdev->req_lock &hdev->lock &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 &hdev->req_lock &hdev->lock &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 &hdev->req_lock &hdev->lock &root->kernfs_rwsem pool_lock#2 irq_context: 0 &hdev->req_lock &hdev->lock kernfs_idr_lock irq_context: 0 &hdev->req_lock &hdev->lock pool_lock#2 irq_context: 0 &hdev->req_lock &hdev->lock &k->k_lock klist_remove_lock irq_context: 0 &hdev->req_lock &hdev->lock &k->list_lock irq_context: 0 &hdev->req_lock &hdev->lock sysfs_symlink_target_lock irq_context: 0 &hdev->req_lock &hdev->lock subsys mutex#81 irq_context: 0 &hdev->req_lock &hdev->lock subsys mutex#81 &k->k_lock irq_context: 0 &hdev->req_lock &hdev->lock subsys mutex#81 &k->k_lock klist_remove_lock irq_context: 0 &hdev->req_lock &hdev->lock &x->wait#9 irq_context: 0 &hdev->req_lock &hdev->lock dpm_list_mtx irq_context: 0 &hdev->req_lock &hdev->lock &dev->power.lock irq_context: 0 &hdev->req_lock &hdev->lock deferred_probe_mutex irq_context: 0 &hdev->req_lock &hdev->lock device_links_lock irq_context: 0 &hdev->req_lock &hdev->lock mmu_notifier_invalidate_range_start irq_context: 0 &hdev->req_lock &hdev->lock uevent_sock_mutex irq_context: 0 &hdev->req_lock &hdev->lock uevent_sock_mutex mmu_notifier_invalidate_range_start irq_context: 0 &hdev->req_lock &hdev->lock uevent_sock_mutex pool_lock#2 irq_context: 0 &hdev->req_lock &hdev->lock uevent_sock_mutex nl_table_lock irq_context: 0 &hdev->req_lock &hdev->lock uevent_sock_mutex &c->lock irq_context: 0 &hdev->req_lock &hdev->lock uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 &hdev->req_lock &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 &hdev->req_lock &hdev->lock uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 &hdev->req_lock &hdev->lock uevent_sock_mutex nl_table_wait.lock irq_context: 0 &hdev->req_lock &hdev->lock uevent_sock_mutex &obj_hash[i].lock irq_context: 0 &hdev->req_lock &hdev->lock &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 &hdev->req_lock &hdev->lock hci_cb_list_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &hdev->req_lock &hdev->lock hci_cb_list_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &hdev->req_lock &hdev->lock hci_cb_list_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 &hdev->req_lock &hdev->lock hci_cb_list_lock rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: softirq &(&br->gc_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &hdev->req_lock &hdev->lock rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 &hdev->req_lock &list->lock#6 irq_context: 0 &hdev->req_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sig->cred_guard_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 rlock-AF_PPPOX irq_context: 0 &sb->s_type->i_mutex_key#10 wlock-AF_PPPOX irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &sem->wait_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &sem->wait_lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &rnp->exp_wq[2] irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->defense_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu &meta->lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu kfence_freelist_lock irq_context: 0 &sig->cred_guard_mutex &mm->mmap_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &hdev->req_lock pgd_lock irq_context: 0 &hdev->req_lock rcu_read_lock pool_lock#2 irq_context: 0 &hdev->req_lock key irq_context: 0 &hdev->req_lock pcpu_lock irq_context: 0 &hdev->req_lock percpu_counters_lock irq_context: 0 &hdev->req_lock &____s->seqcount irq_context: 0 &hdev->req_lock (console_sem).lock irq_context: 0 &hdev->req_lock console_lock console_srcu console_owner_lock irq_context: 0 &hdev->req_lock console_lock console_srcu console_owner irq_context: 0 &hdev->req_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 &hdev->req_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 sb_writers#4 sb_internal pgd_lock irq_context: 0 sb_writers#4 sb_internal rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 sb_internal key irq_context: 0 sb_writers#4 sb_internal pcpu_lock irq_context: 0 sb_writers#4 sb_internal percpu_counters_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_PACKET &po->bind_lock rcu_read_lock batched_entropy_u8.lock irq_context: 0 sk_lock-AF_PACKET &po->bind_lock rcu_read_lock kfence_freelist_lock irq_context: softirq rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: softirq rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq rcu_read_lock_bh rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: softirq rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) kfence_freelist_lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &meta->lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) kfence_freelist_lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#4 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &meta->lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock &vma->vm_lock->lock &cfs_rq->removed.lock irq_context: 0 &hdev->req_lock rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 &hdev->req_lock rcu_state.exp_mutex.wait_lock irq_context: 0 &hdev->req_lock &p->pi_lock irq_context: 0 &hdev->req_lock &p->pi_lock &rq->__lock irq_context: 0 &hdev->req_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &hdev->req_lock rcu_node_0 irq_context: 0 &hdev->req_lock &rcu_state.expedited_wq irq_context: 0 &hdev->req_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &hdev->req_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &hdev->req_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex &ei->xattr_sem &rq->__lock irq_context: 0 &iint->mutex &ei->xattr_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &fsnotify_mark_srcu rcu_read_lock rcu_node_0 irq_context: 0 &fsnotify_mark_srcu rcu_read_lock &rq->__lock irq_context: 0 &fsnotify_mark_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ei->i_data_sem &sem->wait_lock irq_context: 0 &ei->i_data_sem &rq->__lock irq_context: 0 &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sem->wait_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &sem->wait_lock irq_context: softirq slock-AF_INET pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &pa->pa_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &lg->lg_prealloc_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#10 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &vma->vm_lock->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: softirq (&n->timer) icmp_global.lock batched_entropy_u8.lock crngs.lock irq_context: 0 cb_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 &hdev->req_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &journal->j_state_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &p->pi_lock &cfs_rq->removed.lock irq_context: 0 crypto_default_null_skcipher_lock irq_context: 0 crypto_default_null_skcipher_lock &rq->__lock irq_context: 0 crypto_default_null_skcipher_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 crypto_default_null_skcipher_lock crypto_alg_sem irq_context: 0 crypto_default_null_skcipher_lock fs_reclaim irq_context: 0 crypto_default_null_skcipher_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 crypto_default_null_skcipher_lock pool_lock#2 irq_context: 0 namespace_sem pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex &pnettable->lock irq_context: 0 cb_lock genl_mutex &pnettable->lock &c->lock irq_context: 0 cb_lock genl_mutex &pnettable->lock &n->list_lock irq_context: 0 cb_lock genl_mutex &pnettable->lock &n->list_lock &c->lock irq_context: 0 cb_lock genl_mutex &pnettable->lock &rq->__lock irq_context: 0 cb_lock genl_mutex &pnettable->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex &pnettable->lock pool_lock#2 irq_context: 0 cb_lock genl_mutex &pnettable->lock &dir->lock#2 irq_context: 0 pernet_ops_rwsem nf_hook_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem nf_hook_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 crypto_default_null_skcipher_lock irq_context: 0 &sb->s_type->i_mutex_key#10 crypto_default_null_skcipher_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 crypto_default_null_skcipher_lock pool_lock#2 irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI hci_dev_list_lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &hdev->lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &hdev->lock fs_reclaim irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &hdev->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &hdev->lock pool_lock#2 irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &hdev->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 key#3 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 key#14 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &bgl->locks[i].lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock lock#4 &lruvec->lru_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock lock#5 &lruvec->lru_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ret->b_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ret->b_state_lock &journal->j_list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 key#3 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 key#14 irq_context: 0 &hdev->req_lock rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &____s->seqcount#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &meta->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kfence_freelist_lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex fill_pool_map-wait-type-override &n->list_lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag6_mutex fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &c->lock irq_context: softirq rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock fs_reclaim irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock &c->lock irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock remove_cache_srcu irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock remove_cache_srcu quarantine_lock irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock remove_cache_srcu &c->lock irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock remove_cache_srcu &n->list_lock irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 &nft_net->commit_mutex &obj_hash[i].lock pool_lock irq_context: 0 &nft_net->commit_mutex nf_ct_proto_mutex irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 &xt[i].mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 namespace_sem rcu_read_lock &rcu_state.gp_wq irq_context: 0 namespace_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex &____s->seqcount#2 irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 lock#5 &lruvec->lru_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock batched_entropy_u8.lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock kfence_freelist_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock rcu_read_lock_bh &meta->lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock rcu_read_lock_bh kfence_freelist_lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink &rq->__lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &____s->seqcount#7 irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink pool_lock#2 irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock (console_sem).lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock console_lock console_srcu console_owner_lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock console_lock console_srcu console_owner irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_node_0 irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &rq->__lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &nf_nat_locks[i] irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock pool_lock#2 irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &rcu_state.gp_wq irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &c->lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &n->list_lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock batched_entropy_u8.lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock kfence_freelist_lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &meta->lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem fill_pool_map-wait-type-override rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &n->list_lock &c->lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock crngs.lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &____s->seqcount#2 irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &____s->seqcount irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock batched_entropy_u8.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex lock kernfs_idr_lock kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&bat_priv->bla.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &pipe->mutex/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &pipe->mutex/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &pipe->mutex/1 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex fs_reclaim mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem rdma_nets_rwsem rdma_nets_rwsem.wait_lock irq_context: 0 pernet_ops_rwsem rdma_nets_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->monitor_work)->work) rcu_callback &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &sem->wait_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem rdma_nets_rwsem.wait_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) kfence_freelist_lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &meta->lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh kfence_freelist_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) rcu_node_0 irq_context: 0 &type->i_mutex_dir_key#5 remove_cache_srcu pool_lock#2 irq_context: 0 &pipe->mutex/1 rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &obj_hash[i].lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock ptlock_ptr(page)#2 &pcp->lock &zone->lock irq_context: 0 &sig->cred_guard_mutex &sig->exec_update_lock &mm->mmap_lock ptlock_ptr(page)#2 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[1] &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock &meta->lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem.wait_lock irq_context: 0 pernet_ops_rwsem devices_rwsem &p->pi_lock irq_context: 0 pernet_ops_rwsem devices_rwsem &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rdma_nets_rwsem.wait_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &device->compat_devs_mutex &lock->wait_lock irq_context: 0 pernet_ops_rwsem &device->compat_devs_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem &device->compat_devs_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex &n->list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 &root->kernfs_rwsem rcu_node_0 irq_context: 0 &root->kernfs_rwsem &rcu_state.expedited_wq irq_context: 0 &root->kernfs_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &root->kernfs_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &root->kernfs_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock &vma->vm_lock->lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rcu_read_lock &vma->vm_lock->lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock &vma->vm_lock->lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex sysctl_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 sk_lock-AF_PACKET &rnp->exp_wq[1] irq_context: 0 sk_lock-AF_PACKET &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &pcp->lock &zone->lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &p->pi_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &lock->wait_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET slock-AF_INET fill_pool_map-wait-type-override &c->lock irq_context: 0 sk_lock-AF_INET slock-AF_INET fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sk_lock-AF_INET slock-AF_INET fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&p->wq) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx dev_pm_qos_sysfs_mtx.wait_lock irq_context: 0 pernet_ops_rwsem dev_pm_qos_sysfs_mtx &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem dev_pm_qos_sysfs_mtx &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem dev_pm_qos_sysfs_mtx.wait_lock irq_context: softirq rcu_read_lock hwsim_radio_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex batched_entropy_u8.lock crngs.lock irq_context: softirq (&pool->idle_timer) irq_context: softirq (&pool->idle_timer) &pool->lock/1 irq_context: softirq (&pool->idle_timer) &pool->lock/1 &obj_hash[i].lock irq_context: softirq (&pool->idle_timer) &pool->lock/1 &base->lock irq_context: softirq (&pool->idle_timer) &pool->lock/1 &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem &net->sctp.addr_wq_lock k-slock-AF_INET6/1 &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6/1 &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_INET6/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)events free_ipc_work &p->pi_lock irq_context: 0 (wq_completion)events free_ipc_work &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events free_ipc_work &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock &rnp->exp_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex irq_context: 0 pernet_ops_rwsem rtnl_mutex &caifn->caifdevs.lock rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &base->lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 pernet_ops_rwsem rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &cfs_rq->removed.lock irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &cfs_rq->removed.lock irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh pool_lock#2 irq_context: softirq (t) irq_context: softirq (t) &obj_hash[i].lock irq_context: softirq (t) &base->lock irq_context: softirq (t) &base->lock &obj_hash[i].lock irq_context: softirq (&pool->idle_timer) &pool->lock irq_context: softirq (&pool->idle_timer) &pool->lock &obj_hash[i].lock irq_context: softirq (&pool->idle_timer) &pool->lock &base->lock irq_context: softirq (&pool->idle_timer) &pool->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh kfence_freelist_lock irq_context: 0 &rnp->exp_wq[3] irq_context: 0 pernet_ops_rwsem rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 &rnp->exp_wq[1] irq_context: 0 (wq_completion)events free_ipc_work &rnp->exp_wq[1] irq_context: 0 (wq_completion)events free_ipc_work &xa->xa_lock#4 &____s->seqcount irq_context: 0 (wq_completion)events free_ipc_work &xa->xa_lock#4 rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events free_ipc_work per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#8 remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 sb_writers#8 remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events_long &rq->__lock irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock remove_cache_srcu pool_lock#2 irq_context: 0 pernet_ops_rwsem lock kernfs_idr_lock &n->list_lock irq_context: 0 pernet_ops_rwsem lock kernfs_idr_lock &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rfcomm_mutex irq_context: 0 namespace_sem namespace_sem.wait_lock irq_context: 0 namespace_sem.wait_lock irq_context: 0 &xt[i].mutex remove_cache_srcu &rq->__lock &cfs_rq->removed.lock irq_context: 0 rlock-AF_INET irq_context: 0 sk_lock-AF_INET &dir->lock#2 irq_context: 0 sk_lock-AF_INET &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rlock-AF_INET irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &ei->socket.wq.wait irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET rcu_read_lock &____s->seqcount#9 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq &(&flowtable->gc_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &____s->seqcount#10 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &tn->lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex &n->list_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex &n->list_lock &c->lock irq_context: softirq &(&bat_priv->mcast.work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex fill_pool_map-wait-type-override &n->list_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem rcu_node_0 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock krc.lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock krc.lock &base->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &service->lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex defrag4_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_TIPC &tn->nametbl_lock &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dpm_list_mtx &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dpm_list_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex fs_reclaim &rq->__lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock _xmit_ETHER &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex rcu_node_0 irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq security/integrity/ima/ima_queue_keys.c:35 irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: softirq security/integrity/ima/ima_queue_keys.c:35 rcu_read_lock &pool->lock irq_context: softirq security/integrity/ima/ima_queue_keys.c:35 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq security/integrity/ima/ima_queue_keys.c:35 rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq security/integrity/ima/ima_queue_keys.c:35 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq security/integrity/ima/ima_queue_keys.c:35 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (ima_keys_delayed_work).work irq_context: 0 (wq_completion)events (ima_keys_delayed_work).work ima_keys_lock irq_context: 0 (wq_completion)events (ima_keys_delayed_work).work &obj_hash[i].lock irq_context: 0 (wq_completion)events (ima_keys_delayed_work).work pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &base->lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem dev_pm_qos_sysfs_mtx dev_pm_qos_sysfs_mtx.wait_lock irq_context: 0 pernet_ops_rwsem dev_pm_qos_sysfs_mtx &rq->__lock irq_context: 0 pernet_ops_rwsem dev_pm_qos_sysfs_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx.wait_lock irq_context: 0 pernet_ops_rwsem dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &p->pi_lock irq_context: 0 pernet_ops_rwsem dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &sem->wait_lock irq_context: 0 pernet_ops_rwsem dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem dev_pm_qos_sysfs_mtx &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &rq->__lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &sem->wait_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex batched_entropy_u8.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex kfence_freelist_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex &meta->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &p->pi_lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)events free_ipc_work quarantine_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock batched_entropy_u8.lock crngs.lock irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock batched_entropy_u8.lock irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock kfence_freelist_lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu rcu_read_lock &____s->seqcount irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &meta->lock irq_context: 0 &nft_net->commit_mutex rcu_read_lock &n->list_lock irq_context: 0 &nft_net->commit_mutex rcu_read_lock &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle rcu_node_0 irq_context: 0 rtnl_mutex &dev->tx_global_lock &qdisc_xmit_lock_key#4 irq_context: 0 pernet_ops_rwsem ipvs->est_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 pernet_ops_rwsem ipvs->est_mutex &p->pi_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock &____s->seqcount irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock stock_lock irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock &xa->xa_lock#8 irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock lock#4 irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock &xa->xa_lock#8 stock_lock irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock &xa->xa_lock#8 &c->lock irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock &ei->i_es_lock irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock &dd->lock irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock rcu_read_lock &dd->lock irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock rcu_read_lock tk_core.seq.seqcount irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock rcu_read_lock &virtscsi_vq->vq_lock irq_context: 0 sb_writers#4 &iint->mutex &folio_wait_table[i] irq_context: 0 sb_writers#4 &iint->mutex tk_core.seq.seqcount irq_context: 0 sb_writers#4 &iint->mutex sb_writers#4 mount_lock irq_context: 0 sb_writers#4 &iint->mutex sb_writers#4 tk_core.seq.seqcount irq_context: 0 sb_writers#4 &iint->mutex sb_writers#4 mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &iint->mutex sb_writers#4 &journal->j_state_lock irq_context: 0 sb_writers#4 &iint->mutex sb_writers#4 jbd2_handle irq_context: 0 sb_writers#4 &iint->mutex sb_writers#4 jbd2_handle &ei->i_raw_lock irq_context: 0 sb_writers#4 &iint->mutex sb_writers#4 jbd2_handle &journal->j_wait_updates irq_context: 0 sb_writers#4 &iint->mutex sb_writers#4 &obj_hash[i].lock irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock &rq->__lock irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock &xa->xa_lock#8 &n->list_lock irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock &xa->xa_lock#8 &n->list_lock &c->lock irq_context: softirq (&lapb->t1timer) &lapb->lock rcu_read_lock_bh quarantine_lock irq_context: 0 sb_writers#4 &iint->mutex &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &iint->mutex &obj_hash[i].lock irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock &ei->i_es_lock key#2 irq_context: 0 sb_writers#3 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &sb->s_type->i_lock_key#23 &dentry->d_lock irq_context: 0 rtnl_mutex rename_lock.seqcount irq_context: 0 rtnl_mutex &dentry->d_lock &sb->s_type->i_lock_key#23 &dentry->d_lock &lru->node[i].lock irq_context: 0 rtnl_mutex &dentry->d_lock &lru->node[i].lock irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &sem->wait_lock irq_context: 0 rtnl_mutex dev_pm_qos_sysfs_mtx &sem->wait_lock irq_context: 0 rtnl_mutex dev_pm_qos_sysfs_mtx &p->pi_lock irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock &c->lock irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock rcu_read_lock &c->lock irq_context: 0 sb_writers#4 &iint->mutex rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &iint->mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET quarantine_lock irq_context: softirq (&q->adapt_timer)#2 irq_context: softirq (&q->adapt_timer)#2 rcu_read_lock &sch->q.lock irq_context: softirq (&q->adapt_timer)#2 rcu_read_lock &sch->q.lock &obj_hash[i].lock irq_context: softirq (&q->adapt_timer)#2 rcu_read_lock &sch->q.lock &base->lock irq_context: softirq (&q->adapt_timer)#2 rcu_read_lock &sch->q.lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &iint->mutex &____s->seqcount#2 irq_context: 0 sb_writers#4 &iint->mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#5 &pcp->lock &zone->lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) kfence_freelist_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &meta->lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh kfence_freelist_lock irq_context: 0 rtnl_mutex &rnp->exp_wq[0] irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem pool_lock#2 irq_context: 0 (wq_completion)events free_ipc_work &obj_hash[i].lock pool_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#5 rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rdma_nets_rwsem rdma_nets_rwsem.wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rdma_nets_rwsem &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rdma_nets_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 pernet_ops_rwsem &device->compat_devs_mutex rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem &device->compat_devs_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag4_mutex nf_hook_mutex &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex stock_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &p->pi_lock &cfs_rq->removed.lock irq_context: softirq &(&hwstats->traffic_dw)->timer rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: softirq &(&hwstats->traffic_dw)->timer rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock irq_context: softirq &(&hwstats->traffic_dw)->timer rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: softirq &(&hwstats->traffic_dw)->timer rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rdma_nets_rwsem.wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex bpf_devs_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex bpf_devs_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem dev_pm_qos_sysfs_mtx &sem->wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem dev_pm_qos_sysfs_mtx &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &device->compat_devs_mutex &lock->wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &device->compat_devs_mutex &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &device->compat_devs_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &lock->wait_lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) kfence_freelist_lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &meta->lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh kfence_freelist_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem dev_pm_qos_sysfs_mtx dev_pm_qos_sysfs_mtx.wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem dev_pm_qos_sysfs_mtx &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem dev_pm_qos_sysfs_mtx &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem dev_pm_qos_sysfs_mtx.wait_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&bat_priv->tt.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key &nsim_trap_data->trap_lock batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#8 &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#8 &base->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#8 &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) rcu_read_lock &c->lock irq_context: 0 pernet_ops_rwsem kernfs_idr_lock &obj_hash[i].lock pool_lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 pernet_ops_rwsem remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) kfence_freelist_lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &meta->lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)events fqdir_free_work &base->lock irq_context: 0 (wq_completion)events fqdir_free_work &base->lock &obj_hash[i].lock irq_context: softirq &(&br->gc_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &obj_hash[i].lock irq_context: softirq &(&br->gc_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &base->lock irq_context: softirq &(&br->gc_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &mm->mmap_lock remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 &mm->mmap_lock remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events free_ipc_work &cfs_rq->removed.lock irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock &n->list_lock irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock &n->list_lock &c->lock irq_context: 0 lock#5 &lruvec->lru_lock irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock &xa->xa_lock#8 pool_lock#2 irq_context: 0 sb_writers#4 &iint->mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &iint->mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &iint->mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &iint->mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &iint->mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &iint->mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &iint->mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &iint->mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock rcu_node_0 irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 batched_entropy_u8.lock crngs.lock irq_context: 0 &xt[i].mutex quarantine_lock irq_context: 0 rtnl_mutex &app->lock#2 pool_lock#2 irq_context: 0 rtnl_mutex &app->lock pool_lock#2 irq_context: softirq (&app->join_timer) &app->lock pool_lock#2 irq_context: softirq (&app->join_timer) &app->lock &list->lock#11 irq_context: softirq (&app->join_timer) rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&app->join_timer) rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 rtnl_mutex &im->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex fib_info_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex fib_info_lock pool_lock#2 irq_context: 0 rtnl_mutex &tbl->lock &____s->seqcount irq_context: 0 rtnl_mutex &tbl->lock rcu_read_lock pool_lock#2 irq_context: softirq (&app->join_timer) &app->lock &c->lock irq_context: softirq rcu_callback &c->lock irq_context: softirq (&app->join_timer)#2 &app->lock#2 pool_lock#2 irq_context: softirq (&app->join_timer)#2 &app->lock#2 &list->lock#12 irq_context: softirq (&app->join_timer)#2 rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&app->join_timer)#2 rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq rcu_read_lock &sap->sk_lock irq_context: 0 rtnl_mutex &im->lock pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock &zone->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq (&app->join_timer)#2 &app->lock#2 &c->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_ALG rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &pcp->lock &zone->lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults jbd2_handle irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults jbd2_handle &mapping->private_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults jbd2_handle bit_wait_table + i irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &im->lock &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &im->lock &base->lock &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &in_dev->mc_tomb_lock &zone->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &in_dev->mc_tomb_lock rcu_read_lock &c->lock irq_context: 0 rtnl_mutex &sch->q.lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &sch->q.lock tk_core.seq.seqcount irq_context: 0 rtnl_mutex &sch->q.lock &q->current_entry_lock irq_context: 0 rtnl_mutex &sch->q.lock &q->current_entry_lock hrtimer_bases.lock irq_context: 0 rtnl_mutex &sch->q.lock &q->current_entry_lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &iint->mutex rcu_node_0 irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock &n->list_lock irq_context: 0 sb_writers#4 &iint->mutex mapping.invalidate_lock &n->list_lock &c->lock irq_context: hardirq &q->current_entry_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex &n->list_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex &n->list_lock &c->lock irq_context: 0 nlk_cb_mutex-GENERIC rcu_read_lock pool_lock#2 irq_context: 0 nlk_cb_mutex-GENERIC rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 &pipe->mutex/1 rcu_read_lock (console_sem).lock irq_context: 0 &pipe->mutex/1 rcu_read_lock console_lock console_srcu console_owner_lock irq_context: 0 &pipe->mutex/1 rcu_read_lock console_lock console_srcu console_owner irq_context: 0 &pipe->mutex/1 rcu_read_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 &pipe->mutex/1 rcu_read_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink (console_sem).lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink console_lock console_srcu console_owner_lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink console_lock console_srcu console_owner irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink console_lock console_srcu console_owner &port_lock_key irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink console_lock console_srcu console_owner console_owner_lock irq_context: 0 sb_writers#4 &iint->mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &pipe->mutex/1 rcu_read_lock &c->lock irq_context: 0 &pipe->mutex/1 rcu_read_lock &n->list_lock irq_context: 0 &pipe->mutex/1 rcu_read_lock &n->list_lock &c->lock irq_context: 0 &pipe->mutex/1 &n->list_lock irq_context: 0 &pipe->mutex/1 &n->list_lock &c->lock irq_context: 0 &pipe->mutex/1 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &pipe->mutex/1 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 remove_cache_srcu irq_context: 0 &pipe->mutex/1 remove_cache_srcu quarantine_lock irq_context: 0 &pipe->mutex/1 remove_cache_srcu &c->lock irq_context: 0 &pipe->mutex/1 remove_cache_srcu &n->list_lock irq_context: 0 &pipe->mutex/1 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &pipe->mutex/1 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 remove_cache_srcu &rq->__lock irq_context: 0 &pipe->mutex/1 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &pipe->mutex/1 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &pipe->mutex/1 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rcu_read_lock netlbl_domhsh_lock irq_context: 0 &disk->open_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: softirq (&app->join_timer) &app->lock &n->list_lock irq_context: softirq (&app->join_timer) &app->lock &n->list_lock &c->lock irq_context: 0 nlk_cb_mutex-GENERIC rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 nlk_cb_mutex-GENERIC rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 nlk_cb_mutex-GENERIC rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &nf_conntrack_locks[i] batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh &list->lock#14 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: 0 sk_lock-AF_INET remove_cache_srcu rcu_read_lock &____s->seqcount irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults jbd2_handle &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults jbd2_handle &obj_hash[i].lock irq_context: 0 sk_lock-AF_VSOCK fs_reclaim irq_context: 0 sk_lock-AF_VSOCK fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_VSOCK pool_lock#2 irq_context: 0 sk_lock-AF_VSOCK &vvs->rx_lock irq_context: 0 sk_lock-AF_VSOCK &list->lock#44 irq_context: 0 sk_lock-AF_VSOCK rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_VSOCK rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_VSOCK rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sk_lock-AF_VSOCK rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_VSOCK rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_VSOCK &ei->socket.wq.wait irq_context: 0 sk_lock-AF_VSOCK &rq->__lock irq_context: 0 sk_lock-AF_VSOCK &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)vsock-loopback irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) &list->lock#44 irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) vsock_table_lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK slock-AF_VSOCK irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK fs_reclaim irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK pool_lock#2 irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK &dir->lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK &obj_hash[i].lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 slock-AF_VSOCK irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 fs_reclaim irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 pool_lock#2 irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 &vvs->tx_lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 vsock_table_lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 &vvs->rx_lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 &list->lock#44 irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 rcu_read_lock &pool->lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) slock-AF_VSOCK irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK slock-AF_VSOCK &sk->sk_lock.wq irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK &pool->lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK &rq->__lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK &rq->__lock &cfs_rq->removed.lock irq_context: 0 slock-AF_VSOCK &sk->sk_lock.wq irq_context: 0 slock-AF_VSOCK &sk->sk_lock.wq &p->pi_lock irq_context: 0 slock-AF_VSOCK &sk->sk_lock.wq &p->pi_lock &rq->__lock irq_context: 0 slock-AF_VSOCK &sk->sk_lock.wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK &vvs->tx_lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK vsock_table_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 slock-AF_VSOCK irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 &vvs->rx_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 &list->lock#44 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK &vvs->rx_lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK &list->lock#44 irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK rcu_read_lock &pool->lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 clock-AF_VSOCK irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK sk_lock-AF_VSOCK/1 rlock-AF_VSOCK irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK slock-AF_VSOCK &sk->sk_lock.wq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK slock-AF_VSOCK &sk->sk_lock.wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK slock-AF_VSOCK &sk->sk_lock.wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK slock-AF_VSOCK &sk->sk_lock.wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK &base->lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) sk_lock-AF_VSOCK &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK &dir->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK &vvs->rx_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK &list->lock#44 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_VSOCK rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) &obj_hash[i].lock irq_context: 0 (wq_completion)vsock-loopback (work_completion)(&vsock->pkt_work) pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem remove_cache_srcu pool_lock#2 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults jbd2_handle rcu_node_0 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_PACKET &pcp->lock &zone->lock irq_context: 0 sk_lock-AF_PACKET &po->pg_vec_lock irq_context: 0 sk_lock-AF_PACKET &po->pg_vec_lock rlock-AF_PACKET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET &po->bind_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET rcu_state.exp_mutex &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET &po->pg_vec_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET &po->pg_vec_lock rlock-AF_PACKET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_ALG remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 slock-AF_ALG &sk->sk_lock.wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 &rq->__lock irq_context: 0 sk_lock-AF_INET6 &newf->file_lock irq_context: 0 sk_lock-AF_INET6 &sb->s_type->i_lock_key#8 &dentry->d_lock irq_context: 0 &ep->mtx fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &ep->mtx fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle rcu_node_0 irq_context: 0 sk_lock-AF_INET6 &meta->lock irq_context: 0 sb_writers#8 remove_cache_srcu rcu_read_lock pgd_lock irq_context: 0 sb_writers#8 remove_cache_srcu rcu_read_lock stock_lock irq_context: 0 sb_writers#8 remove_cache_srcu rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 remove_cache_srcu rcu_read_lock key irq_context: 0 sb_writers#8 remove_cache_srcu rcu_read_lock pcpu_lock irq_context: 0 sb_writers#8 remove_cache_srcu rcu_read_lock percpu_counters_lock irq_context: 0 sb_writers#8 remove_cache_srcu rcu_read_lock pcpu_lock stock_lock irq_context: 0 sb_writers#8 remove_cache_srcu rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &rdma_nl_types[idx].sem (console_sem).lock irq_context: 0 &rdma_nl_types[idx].sem console_lock console_srcu console_owner_lock irq_context: 0 &rdma_nl_types[idx].sem console_lock console_srcu console_owner irq_context: 0 &rdma_nl_types[idx].sem console_lock console_srcu console_owner &port_lock_key irq_context: 0 &rdma_nl_types[idx].sem console_lock console_srcu console_owner console_owner_lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG inet_diag_table_mutex &hashinfo->ehash_locks[i] irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG rcu_read_lock pool_lock#2 irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG rcu_read_lock rcu_node_0 irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG rcu_read_lock &rq->__lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG rlock-AF_NETLINK irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG &obj_hash[i].lock irq_context: 0 rtnl_mutex &tn->idrinfo->lock#3 irq_context: 0 rtnl_mutex &tn->idrinfo->lock#3 fs_reclaim irq_context: 0 rtnl_mutex &tn->idrinfo->lock#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &tn->idrinfo->lock#3 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 rtnl_mutex &tn->idrinfo->lock#3 pool_lock#2 irq_context: 0 rtnl_mutex &tn->idrinfo->lock#3 &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &lock->wait_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &p->pi_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &nsim_trap_data->trap_lock crngs.lock base_crng.lock irq_context: 0 tomoyo_ss batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond1 irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->alb_work)->work) irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->alb_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->alb_work)->work) &base->lock irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->alb_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->mii_work)->work) irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->mii_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->mii_work)->work) &base->lock irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->mii_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond1 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond1 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond1 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond1 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq &(&bond->alb_work)->timer irq_context: softirq &(&bond->alb_work)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&bond->alb_work)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&bond->alb_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex rcu_read_lock_bh pool_lock#2 irq_context: softirq &(&bond->mcast_work)->timer irq_context: softirq &(&bond->mcast_work)->timer rcu_read_lock &pool->lock/1 irq_context: softirq &(&bond->mcast_work)->timer rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq &(&bond->mcast_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq &(&bond->mcast_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&bond->mcast_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock &bond->mode_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &bond->mode_lock irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex pool_lock#2 irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond1 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond1 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond1 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 cb_lock genl_mutex &pnettable->lock (console_sem).lock irq_context: 0 cb_lock genl_mutex &pnettable->lock console_lock console_srcu console_owner_lock irq_context: 0 cb_lock genl_mutex &pnettable->lock console_lock console_srcu console_owner irq_context: 0 cb_lock genl_mutex &pnettable->lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 cb_lock genl_mutex &pnettable->lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 cb_lock genl_mutex &pnettable->lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_TIPC &tn->nametbl_lock &c->lock irq_context: 0 sk_lock-AF_TIPC stock_lock irq_context: 0 sk_lock-AF_TIPC &dir->lock irq_context: 0 sk_lock-AF_TIPC batched_entropy_u32.lock irq_context: 0 sk_lock-AF_TIPC rcu_read_lock rhashtable_bucket irq_context: 0 sk_lock-AF_TIPC sk_lock-AF_TIPC/1 irq_context: 0 sk_lock-AF_TIPC sk_lock-AF_TIPC/1 slock-AF_TIPC irq_context: 0 sk_lock-AF_TIPC sk_lock-AF_TIPC/1 &obj_hash[i].lock irq_context: 0 sk_lock-AF_TIPC sk_lock-AF_TIPC/1 &base->lock irq_context: 0 sk_lock-AF_TIPC sk_lock-AF_TIPC/1 &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_TIPC sk_lock-AF_TIPC/1 fs_reclaim irq_context: 0 sk_lock-AF_TIPC sk_lock-AF_TIPC/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_TIPC sk_lock-AF_TIPC/1 pool_lock#2 irq_context: 0 sk_lock-AF_TIPC sk_lock-AF_TIPC/1 &list->lock#23 irq_context: 0 sk_lock-AF_TIPC sk_lock-AF_TIPC/1 slock-AF_TIPC &list->lock#23 irq_context: 0 sk_lock-AF_TIPC sk_lock-AF_TIPC/1 slock-AF_TIPC &obj_hash[i].lock irq_context: 0 sk_lock-AF_TIPC sk_lock-AF_TIPC/1 slock-AF_TIPC &base->lock irq_context: 0 sk_lock-AF_TIPC sk_lock-AF_TIPC/1 slock-AF_TIPC &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_TIPC sk_lock-AF_TIPC/1 slock-AF_TIPC rcu_read_lock &ei->socket.wq.wait irq_context: 0 sk_lock-AF_TIPC sk_lock-AF_TIPC/1 slock-AF_TIPC rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 sk_lock-AF_TIPC sk_lock-AF_TIPC/1 slock-AF_TIPC rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_TIPC sk_lock-AF_TIPC/1 slock-AF_TIPC rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC sk_lock-AF_TIPC/1 slock-AF_TIPC pool_lock#2 irq_context: 0 sk_lock-AF_TIPC sk_lock-AF_TIPC/1 &rq->__lock irq_context: 0 sk_lock-AF_TIPC sk_lock-AF_TIPC/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 oom_adj_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle remove_cache_srcu pool_lock#2 irq_context: 0 &pipe->mutex/1 &u->iolock &u->lock irq_context: 0 &pipe->mutex/1 &u->lock irq_context: 0 &pipe->mutex/1 &u->lock &ei->socket.wq.wait irq_context: 0 sock_diag_mutex &n->list_lock irq_context: 0 sock_diag_mutex &n->list_lock &c->lock irq_context: 0 sock_diag_mutex &rq->__lock irq_context: 0 sock_diag_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sock_diag_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 sock_diag_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 sock_diag_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sock_diag_mutex rcu_read_lock rcu_node_0 irq_context: 0 sock_diag_mutex rcu_read_lock &rq->__lock irq_context: 0 sock_diag_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rcu_state.barrier_mutex pool_lock#2 irq_context: softirq &(&bond->mii_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&bond->mii_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex crypto_alg_sem irq_context: 0 rtnl_mutex uevent_sock_mutex rcu_node_0 irq_context: 0 rtnl_mutex &wg->device_update_lock stock_lock irq_context: 0 rtnl_mutex &wg->device_update_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET &____s->seqcount#8 irq_context: 0 rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET batched_entropy_u32.lock irq_context: 0 &pipe->mutex/1 &u->iolock &____s->seqcount irq_context: softirq &(&bond->alb_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq &(&bond->alb_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&bond->alb_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &dev->tx_global_lock _xmit_NONE#2 irq_context: 0 rtnl_mutex &r->consumer_lock#2 irq_context: 0 rtnl_mutex &wg->socket_update_lock irq_context: 0 rtnl_mutex &table->hash[i].lock irq_context: 0 rtnl_mutex &table->hash[i].lock &table->hash2[i].lock irq_context: 0 rtnl_mutex k-clock-AF_INET irq_context: 0 rtnl_mutex k-clock-AF_INET6 irq_context: 0 &wg->device_update_lock irq_context: 0 &wg->device_update_lock &wg->socket_update_lock irq_context: 0 &wg->device_update_lock rcu_state.exp_mutex rcu_node_0 irq_context: 0 &wg->device_update_lock rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &wg->device_update_lock rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 &wg->device_update_lock rcu_state.exp_mutex &rq->__lock irq_context: 0 &wg->device_update_lock rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &wg->device_update_lock &obj_hash[i].lock irq_context: 0 &wg->device_update_lock rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 &wg->device_update_lock &wq->mutex irq_context: 0 &wg->device_update_lock init_lock irq_context: 0 &wg->device_update_lock &wq->mutex &pool->lock irq_context: 0 &wg->device_update_lock &wq->mutex &x->wait#10 irq_context: 0 &wg->device_update_lock wq_pool_mutex irq_context: 0 &wg->device_update_lock wq_pool_mutex &wq->mutex irq_context: 0 &wg->device_update_lock wq_pool_mutex &wq->mutex &pool->lock irq_context: 0 &wg->device_update_lock &rq->__lock irq_context: 0 &wg->device_update_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &wg->device_update_lock &cfs_rq->removed.lock irq_context: 0 &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex crypto_alg_sem irq_context: 0 &net->xfrm.xfrm_cfg_mutex (crypto_chain).rwsem irq_context: 0 &net->xfrm.xfrm_cfg_mutex (crypto_chain).rwsem fs_reclaim irq_context: 0 &net->xfrm.xfrm_cfg_mutex (crypto_chain).rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &net->xfrm.xfrm_cfg_mutex (crypto_chain).rwsem &c->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex (crypto_chain).rwsem &____s->seqcount#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex (crypto_chain).rwsem &____s->seqcount irq_context: 0 &net->xfrm.xfrm_cfg_mutex (crypto_chain).rwsem pool_lock#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex (crypto_chain).rwsem &obj_hash[i].lock irq_context: 0 &wg->device_update_lock rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 &wg->device_update_lock pool_lock#2 irq_context: 0 &wg->device_update_lock pcpu_lock irq_context: 0 &wg->device_update_lock &wq->mutex &pool->lock/1 irq_context: 0 &wg->device_update_lock wq_pool_mutex &wq->mutex &pool->lock/1 irq_context: 0 &wg->device_update_lock &pool->lock/1 irq_context: 0 &wg->device_update_lock &pool->lock/1 rcu_read_lock &pool->lock irq_context: 0 &wg->device_update_lock &pool->lock/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &wg->device_update_lock &pool->lock/1 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &wg->device_update_lock &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &wg->device_update_lock &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &wg->device_update_lock &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &wg->device_update_lock wq_mayday_lock irq_context: 0 &wg->device_update_lock &p->pi_lock irq_context: 0 &wg->device_update_lock &p->pi_lock &rq->__lock irq_context: 0 &wg->device_update_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &wg->device_update_lock &x->wait irq_context: 0 &wg->device_update_lock &r->consumer_lock#2 irq_context: 0 &wg->device_update_lock rcu_read_lock pool_lock#2 irq_context: 0 &wg->device_update_lock rcu_state.barrier_mutex irq_context: 0 &wg->device_update_lock rcu_state.barrier_mutex rcu_state.barrier_lock irq_context: 0 &wg->device_update_lock rcu_state.barrier_mutex &rq->__lock irq_context: 0 &wg->device_update_lock rcu_state.barrier_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &wg->device_update_lock rcu_state.barrier_mutex rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 &wg->device_update_lock rcu_state.barrier_mutex &x->wait#24 irq_context: 0 &wg->device_update_lock &zone->lock irq_context: 0 &wg->device_update_lock &zone->lock &____s->seqcount irq_context: 0 &net->xfrm.xfrm_cfg_mutex &x->wait#21 irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events xfrm_state_gc_work rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 sb_writers#8 &of->mutex kn->active#5 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &tn->idrinfo->lock#4 irq_context: 0 rtnl_mutex &tn->idrinfo->lock#4 fs_reclaim irq_context: 0 rtnl_mutex &tn->idrinfo->lock#4 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &tn->idrinfo->lock#4 pool_lock#2 irq_context: 0 rtnl_mutex &p->tcfa_lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: 0 rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: 0 &ep->mtx uevent_sock_mutex &n->list_lock irq_context: 0 &ep->mtx uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &cfs_rq->removed.lock irq_context: 0 &ep->mtx rlock-AF_PACKET irq_context: 0 &ep->mtx wlock-AF_PACKET irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait &p->pi_lock irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &p->tcfa_lock &c->lock irq_context: softirq (&app->join_timer) rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq (&app->join_timer) rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: softirq rcu_read_lock rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &n->list_lock &c->lock irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh &meta->lock irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh kfence_freelist_lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) kfence_freelist_lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &meta->lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh kfence_freelist_lock irq_context: 0 rcu_read_lock rcu_read_lock_bh &c->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh &n->list_lock irq_context: 0 rcu_read_lock rcu_read_lock_bh &n->list_lock &c->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock &sctp_ep_hashtable[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 crngs.lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 &c->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 sk_lock-AF_INET6 sctp_assocs_id_lock &obj_hash[i].lock irq_context: 0 &u->iolock &u->peer_wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &ei->socket.wq.wait irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &ei->socket.wq.wait irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &bond->mode_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &____s->seqcount irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock &c->lock irq_context: softirq (&app->join_timer) rcu_read_lock_bh rcu_read_lock &c->lock irq_context: softirq (&app->join_timer) rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: softirq (&app->join_timer) rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 &dentry->d_lock rcu_read_lock &sb->s_type->i_lock_key#24 &dentry->d_lock &p->pi_lock irq_context: 0 &dentry->d_lock rcu_read_lock &sb->s_type->i_lock_key#24 &dentry->d_lock &p->pi_lock &rq->__lock irq_context: 0 &dentry->d_lock rcu_read_lock &sb->s_type->i_lock_key#24 &dentry->d_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &u->iolock &rq->__lock &cfs_rq->removed.lock irq_context: 0 uevent_sock_mutex &n->list_lock irq_context: 0 uevent_sock_mutex &n->list_lock &c->lock irq_context: softirq (&n->timer) quarantine_lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 lock#4 &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &sb->s_type->i_mutex_key#10 rds_ib_devices_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rds_ib_devices_lock &pool->flush_lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &n->list_lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &n->list_lock &c->lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex (console_sem).lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex console_lock console_srcu console_owner_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex console_lock console_srcu console_owner irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &pcp->lock &zone->lock &____s->seqcount irq_context: softirq (&q->timeout) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &ep->mtx &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock irq_context: 0 &ep->mtx &root->kernfs_rwsem kernfs_idr_lock pool_lock#2 irq_context: 0 &u->iolock pgd_lock irq_context: 0 &u->iolock key irq_context: 0 &u->iolock pcpu_lock irq_context: 0 &u->iolock percpu_counters_lock irq_context: 0 sk_lock-AF_VSOCK &obj_hash[i].lock irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &ep->mtx gdp_mutex lock kernfs_idr_lock &c->lock irq_context: 0 &ep->mtx gdp_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 &ep->mtx gdp_mutex &rq->__lock irq_context: 0 &ep->mtx gdp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 purge_vmap_area_lock quarantine_lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock rcu_read_lock &ei->socket.wq.wait irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex &n->list_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 rcu_node_0 irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &ei->socket.wq.wait irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 sb_writers#3 oom_adj_mutex rcu_node_0 irq_context: 0 &rdma_nl_types[idx].sem nlk_cb_mutex-RDMA irq_context: 0 &rdma_nl_types[idx].sem nlk_cb_mutex-RDMA fs_reclaim irq_context: 0 &rdma_nl_types[idx].sem nlk_cb_mutex-RDMA fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &rdma_nl_types[idx].sem nlk_cb_mutex-RDMA fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem nlk_cb_mutex-RDMA fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem nlk_cb_mutex-RDMA pool_lock#2 irq_context: 0 &rdma_nl_types[idx].sem nlk_cb_mutex-RDMA &c->lock irq_context: 0 &rdma_nl_types[idx].sem nlk_cb_mutex-RDMA &n->list_lock irq_context: 0 &rdma_nl_types[idx].sem nlk_cb_mutex-RDMA &n->list_lock &c->lock irq_context: 0 &rdma_nl_types[idx].sem nlk_cb_mutex-RDMA devices_rwsem irq_context: 0 &rdma_nl_types[idx].sem nlk_cb_mutex-RDMA devices_rwsem &rq->__lock irq_context: 0 &rdma_nl_types[idx].sem nlk_cb_mutex-RDMA devices_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &rdma_nl_types[idx].sem nlk_cb_mutex-RDMA rlock-AF_NETLINK irq_context: 0 sock_diag_mutex rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 hidp_session_sem irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &cfs_rq->removed.lock irq_context: 0 rtnl_mutex rcu_read_lock_bh &ifa->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock remove_cache_srcu irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &bgl->locks[i].lock &ei->i_prealloc_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &pa->pa_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock &pa->pa_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_es_lock key#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &sem->wait_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ei->i_prealloc_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &pa->pa_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &lg->lg_prealloc_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &sem->wait_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &p->pi_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &sem->wait_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &pa->pa_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#32 rcu_read_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#32 rcu_read_lock &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &type->s_umount_key#32 rcu_read_lock &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &sem->wait_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &memcg->move_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &xa->xa_lock#8 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sb->s_type->i_lock_key#22 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &wb->list_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &n->list_lock &c->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ret->b_state_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ret->b_state_lock &journal->j_list_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem key#14 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &____s->seqcount#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &journal->j_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &journal->j_revoke_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &folio_wait_table[i] irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem rcu_read_lock &pool->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &folio_wait_table[i] irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &folio_wait_table[i] &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &folio_wait_table[i] &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &folio_wait_table[i] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem quarantine_lock irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem remove_cache_srcu irq_context: 0 sb_writers#4 &sbi->s_writepages_rwsem remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &sem->wait_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &p->pi_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle bit_wait_table + i irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &mapping->private_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_wait_commit irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_wait_done_commit irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 stock_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &xa->xa_lock#8 stock_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &xa->xa_lock#8 pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &mapping->private_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &wb->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 key#3 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 key#14 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &wb->list_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &folio_wait_table[i] irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &folio_wait_table[i] &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &folio_wait_table[i] &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem &folio_wait_table[i] &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex fs_reclaim &rq->__lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &rxnet->local_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: softirq (&app->join_timer) &app->lock &____s->seqcount#2 irq_context: softirq (&app->join_timer) &app->lock &____s->seqcount irq_context: softirq &(&bond->alb_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: softirq &(&bond->mii_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex &tn->idrinfo->lock#5 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 batched_entropy_u8.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 kfence_freelist_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &meta->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem ptlock_ptr(page)#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &mapping->private_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &____s->seqcount#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &journal->j_wait_reserved irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock key#3 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock key#14 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_wait_updates &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_wait_updates &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &ei->i_es_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 key#3 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 key#14 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &xa->xa_lock#8 stock_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &xa->xa_lock#8 pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &ei->i_data_sem &ei->i_data_sem/1 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &ei->i_data_sem &ei->i_data_sem/1 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &ei->i_data_sem &ei->i_data_sem/1 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 sb_internal &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &ei->i_data_sem &ei->i_data_sem/1 &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &ei->i_data_sem &ei->i_data_sem/1 &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &xa->xa_lock#8 &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &wb->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem batched_entropy_u8.lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem kfence_freelist_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem &meta->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 lock#4 &lruvec->lru_lock irq_context: 0 sb_writers#4 tomoyo_ss remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 tomoyo_ss remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 tomoyo_ss remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &____s->seqcount#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &____s->seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 quarantine_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_state_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_state_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_state_lock &base->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &mapping->private_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ret->b_state_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle hrtimer_bases.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &(ei->i_block_reservation_lock) key#3 irq_context: 0 &f->f_pos_lock sb_writers#4 &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 pool_lock#2 irq_context: 0 &tsk->futex_exit_mutex pool_lock#2 irq_context: 0 &group->notification_waitq &ep->lock &ep->wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_internal rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock kfence_freelist_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &bgl->locks[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &ei->i_data_sem &ei->i_data_sem/1 &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 &ei->i_data_sem &ei->i_data_sem/1 &n->list_lock &c->lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle &ei->i_data_sem &ei->i_data_sem/1 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 jbd2_handle rcu_node_0 irq_context: 0 cb_lock genl_mutex rtnl_mutex stack_depot_init_mutex irq_context: 0 cb_lock genl_mutex rtnl_mutex crngs.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &xa->xa_lock#3 irq_context: 0 cb_lock genl_mutex rtnl_mutex net_rwsem irq_context: 0 cb_lock genl_mutex rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &tn->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &x->wait#9 irq_context: 0 cb_lock genl_mutex rtnl_mutex &k->list_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex gdp_mutex irq_context: 0 cb_lock genl_mutex rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex lock irq_context: 0 cb_lock genl_mutex rtnl_mutex lock kernfs_idr_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &root->kernfs_rwsem irq_context: 0 cb_lock genl_mutex rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 cb_lock genl_mutex rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 remove_cache_srcu irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 remove_cache_srcu quarantine_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 remove_cache_srcu &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 remove_cache_srcu &n->list_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sb->s_type->i_mutex_key#8/4 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &sem->wait_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex bus_type_sem irq_context: 0 cb_lock genl_mutex rtnl_mutex sysfs_symlink_target_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &pcp->lock &zone->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &root->kernfs_rwsem irq_context: 0 cb_lock genl_mutex rtnl_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &dev->power.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex dpm_list_mtx irq_context: 0 cb_lock genl_mutex rtnl_mutex &n->list_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &n->list_lock &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex uevent_sock_mutex irq_context: 0 cb_lock genl_mutex rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 cb_lock genl_mutex rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rtnl_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 cb_lock genl_mutex rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 cb_lock genl_mutex rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &k->k_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex subsys mutex#17 irq_context: 0 cb_lock genl_mutex rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &dir->lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex dev_hotplug_mutex irq_context: 0 cb_lock genl_mutex rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex dev_base_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex input_pool.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex batched_entropy_u32.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &tbl->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex sysctl_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex nl_table_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex nl_table_wait.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &pnettable->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex smc_ib_devices.mutex irq_context: 0 cb_lock genl_mutex rtnl_mutex pcpu_alloc_mutex irq_context: 0 cb_lock genl_mutex rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 cb_lock genl_mutex rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 cb_lock genl_mutex rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex rlock-AF_NETLINK irq_context: 0 cb_lock genl_mutex rtnl_mutex &local->iflist_mtx#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 cb_lock genl_mutex rtnl_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 cb_lock genl_mutex rtnl_mutex dev_base_lock &xa->xa_lock#3 irq_context: 0 cb_lock genl_mutex rtnl_mutex cpu_hotplug_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex cpu_hotplug_lock &list->lock#5 irq_context: 0 cb_lock genl_mutex rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 cb_lock genl_mutex rtnl_mutex bpf_devs_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 cb_lock genl_mutex rtnl_mutex &in_dev->mc_tomb_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex sysctl_lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex sysctl_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex sysctl_lock krc.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex class irq_context: 0 cb_lock genl_mutex rtnl_mutex (&tbl->proxy_timer) irq_context: 0 cb_lock genl_mutex rtnl_mutex &ul->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &net->xdp.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex krc.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex mirred_list_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &nft_net->commit_mutex irq_context: 0 cb_lock genl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &pnn->pndevs.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &pnn->routes.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex target_list_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex _xmit_NONE irq_context: 0 cb_lock genl_mutex rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 cb_lock genl_mutex rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 cb_lock genl_mutex rtnl_mutex uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex uevent_sock_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex kernfs_idr_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex dev_hotplug_mutex &k->k_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex dev_pm_qos_sysfs_mtx irq_context: 0 cb_lock genl_mutex rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 cb_lock genl_mutex rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 cb_lock genl_mutex rtnl_mutex dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 cb_lock genl_mutex rtnl_mutex &k->k_lock klist_remove_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex subsys mutex#17 &k->k_lock klist_remove_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &root->kernfs_rwsem rcu_read_lock rcu_node_0 irq_context: 0 cb_lock genl_mutex rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 cb_lock genl_mutex rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex deferred_probe_mutex irq_context: 0 cb_lock genl_mutex rtnl_mutex device_links_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex uevent_sock_mutex mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex cpu_hotplug_lock xps_map_mutex irq_context: 0 cb_lock genl_mutex rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 cb_lock genl_mutex rcu_state.barrier_mutex irq_context: 0 cb_lock genl_mutex rcu_state.barrier_mutex rcu_state.barrier_lock irq_context: 0 cb_lock genl_mutex rcu_state.barrier_mutex rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rcu_state.barrier_mutex &x->wait#24 irq_context: 0 cb_lock genl_mutex rcu_state.barrier_mutex &rq->__lock irq_context: 0 cb_lock genl_mutex rcu_state.barrier_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex dev_base_lock irq_context: 0 cb_lock genl_mutex lweventlist_lock irq_context: 0 cb_lock genl_mutex krc.lock irq_context: 0 cb_lock genl_mutex &dir->lock#2 irq_context: 0 cb_lock genl_mutex &dir->lock#2 &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex &dir->lock#2 pool_lock#2 irq_context: 0 cb_lock genl_mutex netdev_unregistering_wq.lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 &f->f_pos_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &f->f_pos_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem ovs_mutex quarantine_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rcu_state.barrier_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem &ht->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem &fn->fou_lock &rq->__lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &____s->seqcount irq_context: 0 pernet_ops_rwsem rcu_read_lock stock_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock pcpu_lock stock_lock irq_context: 0 pernet_ops_rwsem rcu_read_lock pool_lock irq_context: 0 &group->inotify_data.idr_lock &obj_hash[i].lock pool_lock irq_context: 0 ppp_mutex &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex &n->list_lock irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex &mm->mmap_lock fs_reclaim irq_context: 0 ppp_mutex &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 ppp_mutex &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 ppp_mutex &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex &mm->mmap_lock &____s->seqcount irq_context: 0 ppp_mutex &mm->mmap_lock stock_lock irq_context: 0 ppp_mutex &mm->mmap_lock mmu_notifier_invalidate_range_start irq_context: 0 ppp_mutex &mm->mmap_lock ptlock_ptr(page)#2 lock#4 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_policy_lock &c->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_policy_lock pool_lock#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_policy_lock rcu_read_lock rhashtable_bucket irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_policy_lock rcu_read_lock &tb->tb6_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_policy_lock rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_policy_lock &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_policy_lock &base->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_policy_lock &base->lock &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex (console_sem).lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex console_lock console_srcu console_owner_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex console_lock console_srcu console_owner irq_context: 0 &net->xfrm.xfrm_cfg_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 &net->xfrm.xfrm_cfg_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 rcu_node_0 irq_context: 0 &net->xfrm.xfrm_cfg_mutex &policy->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &list->lock#31 irq_context: 0 &net->xfrm.xfrm_cfg_mutex &base->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &xa->xa_lock#8 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &____s->seqcount#2 irq_context: 0 &fsnotify_mark_srcu pgd_lock irq_context: 0 &fsnotify_mark_srcu stock_lock irq_context: 0 &fsnotify_mark_srcu rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex rcu_node_0 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex rcu_read_lock rcu_node_0 irq_context: 0 &fsnotify_mark_srcu key irq_context: 0 &fsnotify_mark_srcu pcpu_lock irq_context: 0 &fsnotify_mark_srcu percpu_counters_lock irq_context: 0 &fsnotify_mark_srcu pcpu_lock stock_lock irq_context: 0 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock irq_context: 0 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock stock_lock irq_context: 0 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock irq_context: 0 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock &hashinfo->ehash_locks[i] irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &ei->socket.wq.wait irq_context: softirq rcu_read_lock rcu_read_lock &dreq->dreq_lock slock-AF_INET6 batched_entropy_u16.lock irq_context: softirq rcu_read_lock rcu_read_lock &dreq->dreq_lock slock-AF_INET6 &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &hashinfo->ehash_locks[i] irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &dccp_hashinfo.bhash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 clock-AF_INET6 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 sk_lock-AF_INET &____s->seqcount#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 sk_lock-AF_INET &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 sk_lock-AF_INET pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 sk_lock-AF_INET &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 sk_lock-AF_INET &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 sk_lock-AF_INET &base->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 sk_lock-AF_INET &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 sk_lock-AF_INET &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 sk_lock-AF_INET &hashinfo->ehash_locks[i] irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET6 &dccp_hashinfo.bhash[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET6 &dccp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET6 &dccp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET6 &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET6 &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_INET6 &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: softirq net/netrom/nr_loopback.c:18 &dir->lock irq_context: softirq net/netrom/nr_loopback.c:18 &obj_hash[i].lock pool_lock irq_context: softirq net/netrom/nr_loopback.c:18 slock-AF_NETROM &c->lock irq_context: softirq net/netrom/nr_loopback.c:18 slock-AF_NETROM pool_lock#2 irq_context: softirq net/netrom/nr_loopback.c:18 slock-AF_NETROM &list->lock#38 irq_context: softirq net/netrom/nr_loopback.c:18 slock-AF_NETROM rlock-AF_NETROM irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_NETROM rlock-AF_NETROM irq_context: 0 sk_lock-AF_NETROM clock-AF_NETROM irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NETROM &list->lock#38 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &cookie->lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &cookie->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&peer->timer_retransmit_handshake) &obj_hash[i].lock irq_context: softirq (&peer->timer_retransmit_handshake) &list->lock#14 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&work->work)#3 irq_context: 0 (wq_completion)events (work_completion)(&work->work)#3 rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)events (work_completion)(&work->work)#3 pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&work->work)#3 &dir->lock#2 irq_context: 0 (wq_completion)events (work_completion)(&work->work)#3 &ul->lock#2 irq_context: 0 (wq_completion)events (work_completion)(&work->work)#3 rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&work->work)#3 rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&work->work)#3 rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)events (work_completion)(&work->work)#3 rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)events (work_completion)(&work->work)#3 rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)events (work_completion)(&work->work)#3 rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)events (work_completion)(&work->work)#3 rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&work->work)#3 rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&work->work)#3 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: 0 (wq_completion)events (work_completion)(&work->work)#3 &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock &base->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock &____s->seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &ndev->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock stock_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &ul->lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: 0 sk_lock-AF_NETROM wlock-AF_NETROM irq_context: 0 sk_lock-AF_NETROM &list->lock#20 irq_context: 0 rtnl_mutex &block->lock &rq->__lock irq_context: 0 rtnl_mutex &block->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock kfence_freelist_lock irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysctl_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysctl_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex sysctl_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &wq->mutex &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_lock_key &xa->xa_lock#8 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_lock_key &xa->xa_lock#8 fill_pool_map-wait-type-override &c->lock irq_context: 0 &sb->s_type->i_lock_key &xa->xa_lock#8 fill_pool_map-wait-type-override pool_lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock batched_entropy_u8.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_node_0 irq_context: 0 pernet_ops_rwsem rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &rnp->exp_wq[1] irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex rcu_node_0 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock kfence_freelist_lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem pcpu_alloc_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock lock#4 rcu_read_lock &p->pi_lock irq_context: 0 &mm->mmap_lock lock#4 rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock lock#4 rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem remove_cache_srcu rcu_node_0 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) quarantine_lock irq_context: softirq (&n->timer) &dir->lock irq_context: softirq (&n->timer) per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) pool_lock irq_context: 0 cb_lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock ovs_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 cb_lock ovs_mutex pool_lock#2 irq_context: 0 cb_lock ovs_mutex krc.lock irq_context: 0 cb_lock ovs_mutex &obj_hash[i].lock pool_lock irq_context: 0 cb_lock ovs_mutex &rq->__lock irq_context: 0 cb_lock ovs_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 cb_lock ovs_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 cb_lock ovs_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock ovs_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 cb_lock ovs_mutex krc.lock &obj_hash[i].lock irq_context: 0 cb_lock ovs_mutex krc.lock &base->lock irq_context: 0 cb_lock ovs_mutex krc.lock &base->lock &obj_hash[i].lock irq_context: 0 &lo->lo_mutex &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex sk_lock-AF_CAN irq_context: 0 rtnl_mutex sk_lock-AF_CAN slock-AF_CAN irq_context: 0 rtnl_mutex sk_lock-AF_CAN fs_reclaim irq_context: 0 rtnl_mutex sk_lock-AF_CAN fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex sk_lock-AF_CAN pool_lock#2 irq_context: 0 rtnl_mutex sk_lock-AF_CAN &net->can.rcvlists_lock irq_context: 0 rtnl_mutex slock-AF_CAN irq_context: 0 rtnl_mutex sk_lock-AF_CAN &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 tcp_md5sig_mutex irq_context: 0 sk_lock-AF_INET6 tcp_md5sig_mutex &rq->__lock irq_context: 0 sk_lock-AF_INET6 tcp_md5sig_mutex crypto_alg_sem irq_context: 0 sk_lock-AF_INET6 tcp_md5sig_mutex fs_reclaim irq_context: 0 sk_lock-AF_INET6 tcp_md5sig_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_INET6 tcp_md5sig_mutex pool_lock#2 irq_context: 0 sk_lock-AF_INET6 &sem->wait_lock irq_context: 0 sk_lock-AF_INET6 &p->pi_lock irq_context: 0 sk_lock-AF_INET6 &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET rcu_read_lock &htab->buckets[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &htab->buckets[i].lock stock_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &htab->buckets[i].lock &psock->link_lock irq_context: 0 elock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock &htab->buckets[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock &htab->buckets[i].lock &psock->link_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock &htab->buckets[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock &htab->buckets[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock &htab->buckets[i].lock krc.lock irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 slock-AF_INET6 &sk->sk_lock.wq irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 pool_lock#2 irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 &f->f_owner.lock irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 &base->lock irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 &base->lock &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 rcu_read_lock &ei->socket.wq.wait irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 rcu_read_lock rcu_node_0 irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 rcu_read_lock &rq->__lock irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 &c->lock irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 rcu_read_lock &rcu_state.gp_wq irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 slock-AF_INET6 &sk->sk_lock.wq irq_context: 0 &pipe->mutex/1 slock-AF_INET6 &sk->sk_lock.wq &p->pi_lock irq_context: 0 &pipe->mutex/1 slock-AF_INET6 &sk->sk_lock.wq &p->pi_lock &rq->__lock irq_context: 0 &pipe->mutex/1 slock-AF_INET6 &sk->sk_lock.wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET6 tk_core.seq.seqcount irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 &f->f_owner.lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &pcp->lock &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 krc.lock irq_context: softirq (&sk->sk_timer)#2 irq_context: softirq (&sk->sk_timer)#2 slock-AF_NETROM irq_context: softirq (&sk->sk_timer)#2 nr_list_lock irq_context: softirq (&sk->sk_timer)#2 &obj_hash[i].lock irq_context: softirq (&sk->sk_timer)#2 wlock-AF_NETROM irq_context: softirq (&sk->sk_timer)#2 &list->lock#20 irq_context: softirq (&sk->sk_timer)#2 rlock-AF_NETROM irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex &____s->seqcount irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock irq_context: softirq net/ipv4/tcp_ipv4.c:1064 irq_context: softirq net/ipv4/tcp_ipv4.c:1064 rcu_read_lock &pool->lock irq_context: softirq net/ipv4/tcp_ipv4.c:1064 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq net/ipv4/tcp_ipv4.c:1064 rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq net/ipv4/tcp_ipv4.c:1064 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq net/ipv4/tcp_ipv4.c:1064 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events ((tcp_md5_needed).work).work irq_context: 0 (wq_completion)events ((tcp_md5_needed).work).work cpu_hotplug_lock irq_context: 0 (wq_completion)events ((tcp_md5_needed).work).work cpu_hotplug_lock jump_label_mutex irq_context: 0 (wq_completion)events ((tcp_md5_needed).work).work cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 (wq_completion)events ((tcp_md5_needed).work).work cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 sk_lock-AF_KCM &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 sk_lock-AF_KCM &c->lock irq_context: 0 sk_lock-AF_INET &rcu_state.expedited_wq irq_context: 0 sk_lock-AF_INET &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sk_lock-AF_INET &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_KCM &sem->wait_lock irq_context: 0 sk_lock-AF_KCM &p->pi_lock irq_context: 0 sk_lock-AF_KCM &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_KCM &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_KCM &rq->__lock irq_context: 0 sk_lock-AF_KCM &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_KCM &mm->mmap_lock fs_reclaim irq_context: 0 sk_lock-AF_KCM &mm->mmap_lock fs_reclaim &rq->__lock irq_context: 0 sk_lock-AF_KCM &mm->mmap_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_KCM &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_KCM &mm->mmap_lock &____s->seqcount irq_context: 0 sk_lock-AF_KCM &mm->mmap_lock stock_lock irq_context: 0 sk_lock-AF_KCM &mm->mmap_lock ptlock_ptr(page) irq_context: 0 sk_lock-AF_KCM &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_KCM &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_KCM &mm->mmap_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_KCM rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_KCM rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_KCM rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_KCM &pcp->lock &zone->lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &u->lock &f->f_owner.lock irq_context: 0 sk_lock-AF_KCM &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sk_lock-AF_KCM &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sk_lock-AF_KCM &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_KCM &mm->mmap_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &block->cb_lock irq_context: 0 sk_lock-AF_KCM &n->list_lock irq_context: 0 sk_lock-AF_KCM &n->list_lock &c->lock irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#62 &rq->__lock irq_context: 0 kn->active#62 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#62 fs_reclaim irq_context: 0 kn->active#62 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#62 stock_lock irq_context: 0 kn->active#62 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#62 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#62 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#6 pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#6 &xa->xa_lock#4 irq_context: 0 &type->i_mutex_dir_key#6 &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#6 stock_lock irq_context: 0 &type->i_mutex_dir_key#6 &dentry->d_lock &wq irq_context: 0 &type->i_mutex_dir_key#6 &rq->__lock irq_context: 0 &type->i_mutex_dir_key#6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_KCM &mm->mmap_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock &c->lock irq_context: 0 sk_lock-AF_KCM &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_KCM rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock sb_writers#4 &journal->j_wait_transaction_locked irq_context: 0 rtnl_mutex &tn->idrinfo->lock#4 &c->lock irq_context: 0 tracepoints_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 tomoyo_ss &n->list_lock &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#62 &c->lock irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 &____s->seqcount#2 irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 &____s->seqcount irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 batched_entropy_u8.lock irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 kfence_freelist_lock irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 &meta->lock irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 fill_pool_map-wait-type-override pool_lock irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex subsys mutex#17 &rq->__lock irq_context: 0 rtnl_mutex subsys mutex#17 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_node_0 irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &cfs_rq->removed.lock irq_context: 0 kn->active#62 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 rtnl_mutex batched_entropy_u8.lock crngs.lock irq_context: 0 cb_lock genl_mutex remove_cache_srcu &____s->seqcount irq_context: 0 cb_lock genl_mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 cb_lock genl_mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 cb_lock genl_mutex remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tomoyo_ss remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 mmu_notifier_invalidate_range_start &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#11 &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem &c->lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &n->list_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &n->list_lock &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 tomoyo_ss batched_entropy_u8.lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 tomoyo_ss kfence_freelist_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 tomoyo_ss &meta->lock irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&work->work)#3 &c->lock irq_context: 0 (wq_completion)events (work_completion)(&work->work)#3 rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#6 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_SCO &rq->__lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_SCO &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex pool_lock#2 irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) kfence_freelist_lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &meta->lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh kfence_freelist_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait &ei->socket.wq.wait &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &u->peer_wait &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &table->lock#3 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock &tbl->lock irq_context: softirq rcu_read_lock rcu_read_lock &n->lock &(&n->ha_lock)->lock irq_context: softirq rcu_read_lock rcu_read_lock &n->lock &(&n->ha_lock)->lock &____s->seqcount#9 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock lock#8 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock id_table_lock irq_context: softirq rcu_read_lock rcu_read_lock &n->lock irq_context: softirq rcu_read_lock rcu_read_lock &n->lock &____s->seqcount#9 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock &base->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock &(&n->ha_lock)->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock &(&n->ha_lock)->lock &____s->seqcount#9 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &table->lock#3 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock lock#8 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock id_table_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock &____s->seqcount#9 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock nl_table_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock nl_table_wait.lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &table->lock#3 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock tcp_metrics_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock tcp_metrics_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock tcp_metrics_lock &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock tcp_metrics_lock &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock tcp_metrics_lock pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) once_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) once_lock crngs.lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &____s->seqcount#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &____s->seqcount irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rds_cong_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rds_trans_sem irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rds_trans_sem fs_reclaim irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rds_trans_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rds_trans_sem pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rds_trans_sem crngs.lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rds_trans_sem &id_priv->handler_mutex irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rds_trans_sem &id_priv->handler_mutex &id_priv->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rds_trans_sem id_table_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rds_trans_sem &x->wait#28 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rds_trans_sem &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_read_lock &c->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_read_lock rds_tcp_conn_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_read_lock rds_conn_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_read_lock rds_conn_lock rds_cong_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock clock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock clock-AF_INET6 rds_tcp_tc_list_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock rcu_read_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock rcu_read_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock rcu_read_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock &cp->cp_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock &rq->__lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &tc->t_conn_path_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-slock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 fs_reclaim irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 &____s->seqcount irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 tk_core.seq.seqcount irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 &base->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 &c->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &table->lock#3 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 &cp->cp_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 &c->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 &____s->seqcount#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) &cp->cp_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 &rm->m_rs_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_recv_w)->work) irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_recv_w)->work) k-sk_lock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_recv_w)->work) k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_recv_w)->work) k-sk_lock-AF_INET6 tk_core.seq.seqcount irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_recv_w)->work) k-slock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 &ep->mtx remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 &____s->seqcount#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 &c->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 &list->lock#45 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 &cp->cp_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock/1 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &root->kernfs_rwsem rcu_node_0 irq_context: 0 &xs->mutex &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &xs->mutex &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 &pcp->lock &zone->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 &sd->defer_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 &n->list_lock &c->lock irq_context: 0 &xs->mutex &mm->mmap_lock fs_reclaim &rq->__lock irq_context: 0 &xs->mutex &mm->mmap_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rcu_node_0 irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_es_lock key#8 irq_context: 0 &xs->mutex &mm->mmap_lock ptlock_ptr(page) irq_context: softirq (&icsk->icsk_retransmit_timer) k-slock-AF_INET6 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 rtnl_mutex k-sk_lock-AF_INET6 fs_reclaim irq_context: 0 rtnl_mutex k-sk_lock-AF_INET6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex k-sk_lock-AF_INET6 pool_lock#2 irq_context: 0 rtnl_mutex k-sk_lock-AF_INET6 &idev->mc_lock irq_context: 0 rtnl_mutex k-sk_lock-AF_INET6 &idev->mc_lock &rq->__lock irq_context: 0 &xs->mutex rcu_read_lock rcu_node_0 irq_context: 0 &xs->mutex rcu_read_lock &rq->__lock irq_context: 0 &xs->mutex &lock->wait_lock irq_context: 0 &xs->mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &map->freeze_mutex irq_context: 0 &mm->mmap_lock &map->freeze_mutex &vma->vm_lock->lock irq_context: 0 &mm->mmap_lock &map->freeze_mutex vmap_area_lock irq_context: 0 &mm->mmap_lock &map->freeze_mutex fs_reclaim irq_context: 0 &mm->mmap_lock &map->freeze_mutex fs_reclaim &rq->__lock irq_context: 0 &mm->mmap_lock &map->freeze_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &mm->mmap_lock &map->freeze_mutex &____s->seqcount irq_context: 0 &mm->mmap_lock &map->freeze_mutex pool_lock#2 irq_context: 0 &mm->mmap_lock &map->freeze_mutex stock_lock irq_context: 0 &mm->mmap_lock &map->freeze_mutex ptlock_ptr(page) irq_context: 0 &mm->mmap_lock &map->freeze_mutex ptlock_ptr(page)#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &table->lock#3 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) &cp->cp_cm_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 clock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 clock-AF_INET6 rds_tcp_tc_list_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-slock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 k-slock-AF_INET6 &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) k-sk_lock-AF_INET6 k-slock-AF_INET6 elock-AF_INET6 irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) &dir->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) &sb->s_type->i_lock_key#8 irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) &xa->xa_lock#8 irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) &fsnotify_mark_srcu irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) &cp->cp_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) (work_completion)(&(&cp->cp_conn_w)->work) irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 key#23 irq_context: 0 (wq_completion)events_unbound connector_reaper_work fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_unbound connector_reaper_work fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work fill_pool_map-wait-type-override pool_lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 tasklist_lock &sighand->siglock &(&sig->stats_lock)->lock &____s->seqcount#5 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) &c->lock irq_context: 0 rtnl_mutex &net->xdp.lock &xs->mutex irq_context: 0 rtnl_mutex &net->xdp.lock &xs->mutex &lock->wait_lock irq_context: 0 rtnl_mutex &net->xdp.lock &xs->mutex &rq->__lock irq_context: 0 rtnl_mutex &net->xdp.lock &xs->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_NFC llcp_devices_lock irq_context: 0 sk_lock-AF_NFC &rq->__lock irq_context: 0 sk_lock-AF_NFC &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_NFC fs_reclaim irq_context: 0 sk_lock-AF_NFC fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_NFC pool_lock#2 irq_context: 0 sk_lock-AF_NFC &local->sdp_lock irq_context: 0 sk_lock-AF_NFC &local->sdp_lock &local->sockets.lock irq_context: 0 sk_lock-AF_NFC &local->sockets.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NFC irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NFC slock-AF_NFC irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_NFC &local->sockets.lock irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_NFC irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 quarantine_lock irq_context: 0 &xs->mutex &mm->mmap_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex &net->xdp.lock &lock->wait_lock irq_context: 0 rtnl_mutex &net->xdp.lock &p->pi_lock irq_context: 0 rtnl_mutex &net->xdp.lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &net->xdp.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &net->xdp.lock &rq->__lock irq_context: 0 rtnl_mutex &net->xdp.lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 &rq->__lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 &list->lock#45 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 &c->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 &____s->seqcount#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 &____s->seqcount irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 &sd->defer_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 tk_core.seq.seqcount irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock &ei->socket.wq.wait irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 &base->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 rcu_node_0 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &table->lock#3 irq_context: softirq slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq slock-AF_INET6 &base->lock irq_context: softirq slock-AF_INET6 &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 quarantine_lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&umem->work) rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&umem->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)events (work_completion)(&umem->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&umem->work) &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) &pcp->lock &zone->lock &____s->seqcount irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 &pcp->lock &zone->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&cp->cp_down_w) &list->lock#45 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock crngs.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 fs_reclaim &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 fs_reclaim &obj_hash[i].lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 fs_reclaim &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &____s->seqcount#11 irq_context: 0 &sb->s_type->i_mutex_key#10 &ping_table.lock irq_context: 0 &nft_net->commit_mutex (console_sem).lock irq_context: 0 &nft_net->commit_mutex console_lock console_srcu console_owner_lock irq_context: 0 &nft_net->commit_mutex console_lock console_srcu console_owner irq_context: 0 &nft_net->commit_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 &nft_net->commit_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 &nft_net->commit_mutex &rnp->exp_lock irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex irq_context: 0 &nft_net->commit_mutex rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex nf_ct_proto_mutex irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &____s->seqcount#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &smc->clcsock_release_lock &obj_hash[i].lock pool_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 batched_entropy_u8.lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 kfence_freelist_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 &meta->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &journal->j_state_lock irq_context: softirq (&n->timer) rcu_read_lock lock#8 irq_context: softirq (&n->timer) &n->lock &____s->seqcount#9 irq_context: 0 sk_lock-AF_INET rcu_read_lock &sighand->siglock irq_context: 0 sk_lock-AF_INET &sighand->siglock irq_context: 0 sk_lock-AF_INET &sighand->siglock stock_lock irq_context: 0 sk_lock-AF_INET &sighand->siglock pool_lock#2 irq_context: 0 sk_lock-AF_INET &sighand->siglock &p->pi_lock irq_context: 0 sk_lock-AF_INET &sighand->siglock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET &sighand->siglock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock pool_lock#2 irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock pgd_lock irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock stock_lock irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock key irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock pcpu_lock irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock percpu_counters_lock irq_context: 0 &xs->mutex &mm->mmap_lock rcu_read_lock pcpu_lock stock_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6/1 clock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)events (work_completion)(&umem->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&umem->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex deferred_probe_mutex &rq->__lock irq_context: 0 rtnl_mutex deferred_probe_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem &table->lock#4 &rq->__lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem &table->lock#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &base->lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock &im->lock irq_context: 0 &smc->clcsock_release_lock nf_sockopt_mutex irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex ip_vs_sched_mutex irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex nf_hook_mutex irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex nf_hook_mutex fs_reclaim irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex nf_hook_mutex fs_reclaim &rq->__lock irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex nf_hook_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex nf_hook_mutex stock_lock irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex nf_hook_mutex pool_lock#2 irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex cpu_hotplug_lock irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex &obj_hash[i].lock irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex &rq->__lock irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex pool_lock#2 irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex nf_hook_mutex &c->lock irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex fs_reclaim irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex pcpu_alloc_mutex irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex &c->lock irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex rcu_read_lock &pool->lock irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &smc->clcsock_release_lock __ip_vs_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex fs_reclaim irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex pool_lock#2 irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex kthread_create_lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex &p->pi_lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex &x->wait irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex &pool->lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex &rq->__lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex (console_sem).lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex console_lock console_srcu console_owner irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)events_long (work_completion)(&(&ipvs->est_reload_work)->work) ipvs->est_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 __ip_vs_mutex irq_context: 0 &wq#4 irq_context: 0 rcu_read_lock &s->lock irq_context: 0 __ip_vs_mutex ipvs->est_mutex irq_context: 0 __ip_vs_mutex (console_sem).lock irq_context: 0 __ip_vs_mutex console_lock console_srcu console_owner_lock irq_context: 0 __ip_vs_mutex console_lock console_srcu console_owner irq_context: 0 __ip_vs_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 __ip_vs_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 __ip_vs_mutex fs_reclaim irq_context: 0 __ip_vs_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &xs->mutex &mm->mmap_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 __ip_vs_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 __ip_vs_mutex &c->lock irq_context: 0 __ip_vs_mutex pool_lock#2 irq_context: 0 &mm->mmap_lock sb_pagefaults remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock sb_pagefaults remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 &n->list_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &ei->socket.wq.wait &ep->lock rcu_read_lock &ws->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_read_lock &ei->socket.wq.wait &ep->lock rcu_read_lock &ws->lock &obj_hash[i].lock irq_context: 0 &xs->mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &xs->mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &table->lock#3 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 &lruvec->lru_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 clock-AF_INET6 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#8 &type->i_mutex_dir_key#4 &root->kernfs_rwsem &p->pi_lock irq_context: softirq (&p->timer) irq_context: softirq (&p->timer) &br->multicast_lock irq_context: softirq (&p->timer) &br->multicast_lock pool_lock#2 irq_context: softirq (&p->timer) &br->multicast_lock &dir->lock#2 irq_context: softirq (&p->timer) &br->multicast_lock deferred_lock irq_context: softirq (&p->timer) &br->multicast_lock rcu_read_lock &pool->lock irq_context: softirq (&p->timer) &br->multicast_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq (&p->timer) &br->multicast_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq (&p->timer) &br->multicast_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq (&p->timer) &br->multicast_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&p->timer) &br->multicast_lock nl_table_lock irq_context: softirq (&p->timer) &br->multicast_lock &obj_hash[i].lock irq_context: softirq (&p->timer) &br->multicast_lock nl_table_wait.lock irq_context: softirq (&p->timer) &br->multicast_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq (&p->timer) &br->multicast_lock &base->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &n->list_lock irq_context: softirq (&p->timer) &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &local->handle_wake_tx_queue_lock hwsim_radio_lock &n->list_lock &c->lock irq_context: softirq (&p->timer) &br->multicast_lock &c->lock irq_context: softirq (&p->timer) &br->multicast_lock &n->list_lock irq_context: softirq (&p->timer) &br->multicast_lock &n->list_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: softirq (&mp->timer) irq_context: softirq (&mp->timer) &br->multicast_lock irq_context: softirq (&mp->timer) &br->multicast_lock pool_lock#2 irq_context: softirq (&mp->timer) &br->multicast_lock &c->lock irq_context: softirq (&mp->timer) &br->multicast_lock &dir->lock#2 irq_context: softirq (&mp->timer) &br->multicast_lock deferred_lock irq_context: softirq (&mp->timer) &br->multicast_lock nl_table_lock irq_context: softirq (&mp->timer) &br->multicast_lock &obj_hash[i].lock irq_context: softirq (&mp->timer) &br->multicast_lock nl_table_wait.lock irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock &pool->lock irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq (&mp->timer) &br->multicast_lock &n->list_lock irq_context: softirq (&mp->timer) &br->multicast_lock &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) &br->multicast_lock irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) (&p->rexmit_timer) irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) &base->lock irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) (&p->timer) irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) pool_lock#2 irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) krc.lock irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) (&mp->timer) irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 &hashinfo->ehash_locks[i] irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&(&cp->cp_send_w)->work) k-sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: softirq &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->mii_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->mii_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &ul->lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: 0 sk_lock-AF_ALG fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sk_lock-AF_ALG fill_pool_map-wait-type-override pool_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: 0 pcpu_alloc_mutex free_vmap_area_lock irq_context: 0 pcpu_alloc_mutex vmap_area_lock irq_context: 0 pcpu_alloc_mutex init_mm.page_table_lock irq_context: 0 pcpu_alloc_mutex rcu_read_lock pool_lock#2 irq_context: 0 pcpu_alloc_mutex &c->lock irq_context: 0 pcpu_alloc_mutex &n->list_lock irq_context: 0 pcpu_alloc_mutex &n->list_lock &c->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex rcu_node_0 irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pcpu_alloc_mutex rcu_node_0 irq_context: 0 pcpu_alloc_mutex rcu_read_lock rcu_node_0 irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &peer->keypairs.keypair_update_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &peer->keypairs.keypair_update_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock (&peer->timer_retransmit_handshake) irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &base->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock (&peer->timer_send_keepalive) irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock (&peer->timer_new_handshake) irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock (&peer->timer_zero_key_material) irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &base->lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock (&peer->timer_persistent_keepalive) irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock (work_completion)(&peer->clear_peer_work) irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock (wq_completion)wg-crypt-wg1#3 irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &wq->mutex irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &wq->mutex &pool->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &wq->mutex &x->wait#10 irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &wq->mutex &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &wq->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock napi_hash_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock (wq_completion)wg-kex-wg1#5 irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &wq->mutex &pool->lock/1 irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &table->lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock pcpu_alloc_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock pcpu_alloc_mutex &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pcpu_alloc_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 pcpu_alloc_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 pcpu_alloc_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 pcpu_alloc_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock pcpu_alloc_mutex.wait_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &p->pi_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock rcu_read_lock_bh rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET clock-AF_INET &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET &mux->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &pool->lock/1 irq_context: 0 sk_lock-AF_INET rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)kstrp irq_context: 0 (wq_completion)kstrp (work_completion)(&strp->work)#2 irq_context: 0 (wq_completion)kstrp (work_completion)(&strp->work)#2 sk_lock-AF_INET irq_context: 0 (wq_completion)kstrp (work_completion)(&strp->work)#2 sk_lock-AF_INET slock-AF_INET irq_context: 0 (wq_completion)kstrp (work_completion)(&strp->work)#2 sk_lock-AF_INET slock-AF_INET &sk->sk_lock.wq irq_context: 0 (wq_completion)kstrp (work_completion)(&strp->work)#2 sk_lock-AF_INET &rq->__lock irq_context: 0 (wq_completion)kstrp (work_completion)(&strp->work)#2 sk_lock-AF_INET &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)kstrp (work_completion)(&strp->work)#2 sk_lock-AF_INET tk_core.seq.seqcount irq_context: 0 (wq_completion)kstrp (work_completion)(&strp->work)#2 slock-AF_INET irq_context: 0 pcpu_alloc_mutex &____s->seqcount#2 irq_context: 0 pcpu_alloc_mutex fs_reclaim &rq->__lock irq_context: 0 pcpu_alloc_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pcpu_alloc_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 pcpu_alloc_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_KCM slock-AF_KCM &sk->sk_lock.wq irq_context: 0 pcpu_alloc_mutex &pcp->lock &zone->lock irq_context: 0 sk_lock-AF_KCM &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 sk_lock-AF_KCM &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sk_lock-AF_KCM &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_KCM &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_KCM &mm->mmap_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_KCM &mm->mmap_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_KCM &mm->mmap_lock &c->lock irq_context: 0 pcpu_alloc_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 slock-AF_KCM &sk->sk_lock.wq irq_context: 0 slock-AF_KCM &sk->sk_lock.wq &p->pi_lock irq_context: 0 slock-AF_KCM &sk->sk_lock.wq &p->pi_lock &rq->__lock irq_context: 0 slock-AF_KCM &sk->sk_lock.wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_KCM sk_lock-AF_INET irq_context: 0 sk_lock-AF_KCM sk_lock-AF_INET slock-AF_INET irq_context: 0 sk_lock-AF_KCM slock-AF_INET irq_context: 0 pcpu_alloc_mutex free_vmap_area_lock &obj_hash[i].lock irq_context: 0 pcpu_alloc_mutex free_vmap_area_lock pool_lock#2 irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex purge_vmap_area_lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex purge_vmap_area_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex purge_vmap_area_lock pool_lock#2 irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events pcpu_balance_work &rq->__lock irq_context: 0 (wq_completion)events pcpu_balance_work &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_KCM rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_KCM rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_KCM rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_KCM &pcp->lock &zone->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_KCM &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_KCM &mux->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_KCM rcu_read_lock &ei->socket.wq.wait irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_KCM rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_KCM rcu_read_lock &ei->socket.wq.wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_KCM rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_KCM rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET &sighand->siglock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET clock-AF_INET &mux->rx_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (work_completion)(&(&strp->msg_timer_work)->work) irq_context: 0 &sb->s_type->i_mutex_key#10 (work_completion)(&strp->work)#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET prog_idr_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET prog_idr_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET prog_idr_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET bpf_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &mux->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 &____s->seqcount#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) &pcp->lock &zone->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rds_trans_sem &c->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) rcu_read_lock &pcp->lock &zone->lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_node_0 irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &cfs_rq->removed.lock irq_context: 0 &x->wait#29 irq_context: 0 &x->wait#29 &p->pi_lock irq_context: 0 &x->wait#29 &p->pi_lock &rq->__lock irq_context: 0 &x->wait#29 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&old_rcpu->kthread_stop_wq) irq_context: 0 (wq_completion)events (work_completion)(&old_rcpu->kthread_stop_wq) rcu_state.barrier_mutex irq_context: 0 (wq_completion)events (work_completion)(&old_rcpu->kthread_stop_wq) rcu_state.barrier_mutex rcu_state.barrier_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&old_rcpu->kthread_stop_wq) rcu_state.barrier_mutex &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&old_rcpu->kthread_stop_wq) rcu_state.barrier_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&old_rcpu->kthread_stop_wq) rcu_state.barrier_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&old_rcpu->kthread_stop_wq) rcu_state.barrier_mutex rcu_state.barrier_lock irq_context: 0 (wq_completion)events (work_completion)(&old_rcpu->kthread_stop_wq) rcu_state.barrier_mutex rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&old_rcpu->kthread_stop_wq) rcu_state.barrier_mutex &x->wait#24 irq_context: 0 (wq_completion)events (work_completion)(&old_rcpu->kthread_stop_wq) rcu_state.barrier_mutex rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&old_rcpu->kthread_stop_wq) rcu_state.barrier_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&old_rcpu->kthread_stop_wq) &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&old_rcpu->kthread_stop_wq) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&old_rcpu->kthread_stop_wq) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&old_rcpu->kthread_stop_wq) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&old_rcpu->kthread_stop_wq) &x->wait irq_context: 0 (wq_completion)events (work_completion)(&old_rcpu->kthread_stop_wq) &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&old_rcpu->kthread_stop_wq) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&old_rcpu->kthread_stop_wq) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &r->consumer_lock#5 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#14 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock k-slock-AF_INET6 rcu_read_lock &c->lock irq_context: 0 data_sockets.lock irq_context: 0 &sb->s_type->i_mutex_key#10 console_lock console_srcu console_owner_lock irq_context: 0 &sb->s_type->i_mutex_key#10 console_lock console_srcu console_owner irq_context: 0 &sb->s_type->i_mutex_key#10 console_lock console_srcu console_owner &port_lock_key irq_context: 0 &sb->s_type->i_mutex_key#10 console_lock console_srcu console_owner console_owner_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock k-slock-AF_INET6 rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock k-slock-AF_INET6 rcu_read_lock &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 data_sockets.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_ISDN irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_ISDN slock-AF_ISDN irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_ISDN clock-AF_ISDN irq_context: 0 &vma->vm_lock->lock rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &vma->vm_lock->lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_ISDN rlock-AF_ISDN irq_context: 0 &sb->s_type->i_mutex_key#10 slock-AF_ISDN irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#14 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) pool_lock#2 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#14 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &r->producer_lock#2 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tracepoints_mutex cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&old_rcpu->kthread_stop_wq) rcu_state.barrier_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&old_rcpu->kthread_stop_wq) &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&old_rcpu->kthread_stop_wq) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&old_rcpu->kthread_stop_wq) pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock crngs.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#14 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &dir->lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh stock_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &sighand->siglock irq_context: 0 sk_lock-AF_INET6 &sighand->siglock irq_context: 0 sk_lock-AF_INET6 &sighand->siglock stock_lock irq_context: 0 sk_lock-AF_INET6 &sighand->siglock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 &sighand->siglock &p->pi_lock irq_context: 0 sk_lock-AF_INET6 &sighand->siglock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 &sighand->siglock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (reaper_work).work &meta->lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 batched_entropy_u32.lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#2 &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key#2 krc.lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work kfence_freelist_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &table->lock#3 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) remove_cache_srcu irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) remove_cache_srcu &c->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) remove_cache_srcu &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock &____s->seqcount#7 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock &nf_conntrack_locks[i] irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: 0 &group->mark_mutex batched_entropy_u8.lock irq_context: 0 &group->mark_mutex kfence_freelist_lock irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 &u->iolock stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &nft_net->commit_mutex defrag4_mutex irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 kn->active#63 fs_reclaim irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 &hashinfo->ehash_locks[i] irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock stock_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock slock-AF_INET6/1 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 k-slock-AF_INET6 &obj_hash[i].lock irq_context: 0 (wq_completion)krdsd (work_completion)(&rtn->rds_tcp_accept_w) k-sk_lock-AF_INET6 k-slock-AF_INET6 elock-AF_INET6 irq_context: 0 kn->active#63 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#63 stock_lock irq_context: 0 kn->active#63 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#63 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#63 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key#7 &dentry->d_lock &wq#2 irq_context: 0 kn->active#64 fs_reclaim irq_context: 0 kn->active#64 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#64 stock_lock irq_context: 0 kn->active#64 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#64 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 &u->iolock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &u->iolock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &u->iolock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &u->iolock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 remove_cache_srcu &obj_hash[i].lock pool_lock irq_context: 0 kn->active#64 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#64 &c->lock irq_context: softirq (&p->timer) &br->multicast_lock &____s->seqcount#2 irq_context: softirq (&p->timer) &br->multicast_lock &____s->seqcount irq_context: 0 (wq_completion)events_long (work_completion)(&br->mcast_gc_work) &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&peer->transmit_packet_work) &rq->__lock irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &n->list_lock &c->lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex lock kernfs_idr_lock &c->lock irq_context: 0 sb_writers#11 &____s->seqcount#2 irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock css_set_lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock fs_reclaim irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock pool_lock#2 irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cpuset_mutex irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock css_set_lock cgroup_file_kn_lock irq_context: softirq (&app->join_timer) rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock css_set_lock &p->pi_lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock css_set_lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock css_set_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock &rq->__lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cpuset_mutex &p->pi_lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cpuset_mutex &p->pi_lock &rq->__lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cpuset_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &dir->lock#2 irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cpuset_mutex &p->alloc_lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cpuset_mutex &p->alloc_lock &____s->seqcount#2 irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cpuset_mutex cpuset_attach_wq.lock irq_context: 0 sb_writers#11 &p->lock &of->mutex kn->active#64 &cgrp->pidlist_mutex irq_context: 0 sb_writers#11 &p->lock &of->mutex kn->active#64 &cgrp->pidlist_mutex css_set_lock irq_context: 0 sb_writers#11 &p->lock &of->mutex kn->active#64 &cgrp->pidlist_mutex fs_reclaim irq_context: 0 sb_writers#11 &p->lock &of->mutex kn->active#64 &cgrp->pidlist_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &p->lock &of->mutex kn->active#64 &cgrp->pidlist_mutex pool_lock#2 irq_context: 0 sb_writers#11 &p->lock &of->mutex kn->active#64 &cgrp->pidlist_mutex &obj_hash[i].lock irq_context: 0 sb_writers#11 &p->lock &of->mutex kn->active#64 &cgrp->pidlist_mutex &base->lock irq_context: 0 sb_writers#11 &p->lock &of->mutex kn->active#64 &cgrp->pidlist_mutex &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#11 &p->lock &of->mutex kn->active#64 &cgrp->pidlist_mutex &rq->__lock irq_context: 0 sb_writers#11 &p->lock &of->mutex kn->active#64 &cgrp->pidlist_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 rcu_node_0 irq_context: 0 sb_writers#11 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#11 &of->mutex kn->active#63 cpu_hotplug_lock irq_context: 0 sb_writers#11 &of->mutex kn->active#63 cpu_hotplug_lock cpuset_mutex irq_context: 0 sb_writers#11 &of->mutex kn->active#63 cpu_hotplug_lock cpuset_mutex fs_reclaim irq_context: 0 sb_writers#11 &of->mutex kn->active#63 cpu_hotplug_lock cpuset_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#11 &of->mutex kn->active#63 cpu_hotplug_lock cpuset_mutex &c->lock irq_context: 0 sb_writers#11 &of->mutex kn->active#63 cpu_hotplug_lock cpuset_mutex pool_lock#2 irq_context: 0 sb_writers#11 &of->mutex kn->active#63 cpu_hotplug_lock cpuset_mutex callback_lock irq_context: 0 sb_writers#11 &of->mutex kn->active#63 cpu_hotplug_lock cpuset_mutex css_set_lock irq_context: 0 sb_writers#11 &of->mutex kn->active#63 cpu_hotplug_lock cpuset_mutex &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key#7 &xa->xa_lock#4 irq_context: 0 &type->i_mutex_dir_key#7 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex defrag4_mutex irq_context: 0 css_set_lock cgroup_file_kn_lock irq_context: softirq rcu_callback css_set_lock irq_context: softirq rcu_callback css_set_lock &obj_hash[i].lock irq_context: softirq rcu_callback css_set_lock pool_lock#2 irq_context: softirq rcu_callback css_set_lock krc.lock irq_context: 0 cb_lock genl_mutex &nbd->config_lock &obj_hash[i].lock pool_lock irq_context: 0 kn->active#64 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_node_0 irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &rq->__lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem &obj_hash[i].lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem.rss.gp_wait.lock &obj_hash[i].lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem &n->list_lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 &root->kernfs_rwsem &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &n->list_lock &c->lock irq_context: 0 rcu_read_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 proto_tab_lock raw_sk_list.lock irq_context: 0 &sb->s_type->i_mutex_key#10 raw_sk_list.lock irq_context: 0 kn->active#63 &c->lock irq_context: 0 &xt[i].mutex remove_cache_srcu rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 sb_writers#11 &of->mutex cgroup_mutex cpu_hotplug_lock cgroup_threadgroup_rwsem rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 batched_entropy_u32.lock irq_context: softirq (&sk->sk_timer) k-slock-AF_INET6 irq_context: softirq (&sk->sk_timer) k-slock-AF_INET6 tk_core.seq.seqcount irq_context: softirq (&sk->sk_timer) k-slock-AF_INET6 &c->lock irq_context: softirq (&sk->sk_timer) k-slock-AF_INET6 pool_lock#2 irq_context: softirq (&sk->sk_timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq (&sk->sk_timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq (&sk->sk_timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq (&sk->sk_timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq (&sk->sk_timer) k-slock-AF_INET6 &obj_hash[i].lock irq_context: softirq (&sk->sk_timer) k-slock-AF_INET6 &base->lock irq_context: softirq (&sk->sk_timer) k-slock-AF_INET6 &base->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_raw_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock &lock->wait_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &lg->lg_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rq->__lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rq->__lock &base->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: softirq &(&l->destroy_dwork)->timer irq_context: softirq &(&l->destroy_dwork)->timer rcu_read_lock &pool->lock irq_context: softirq &(&l->destroy_dwork)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 tracepoints_mutex rcu_state.exp_mutex.wait_lock irq_context: softirq &(&l->destroy_dwork)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 tracepoints_mutex &p->pi_lock irq_context: softirq &(&l->destroy_dwork)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 tracepoints_mutex &p->pi_lock &rq->__lock irq_context: softirq &(&l->destroy_dwork)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tracepoints_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)cgroup_pidlist_destroy irq_context: 0 (wq_completion)cgroup_pidlist_destroy (work_completion)(&(&l->destroy_dwork)->work) irq_context: 0 (wq_completion)cgroup_pidlist_destroy (work_completion)(&(&l->destroy_dwork)->work) &cgrp->pidlist_mutex irq_context: 0 (wq_completion)cgroup_pidlist_destroy (work_completion)(&(&l->destroy_dwork)->work) &cgrp->pidlist_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)cgroup_pidlist_destroy (work_completion)(&(&l->destroy_dwork)->work) &cgrp->pidlist_mutex pool_lock#2 irq_context: 0 (wq_completion)cgroup_pidlist_destroy (work_completion)(&(&l->destroy_dwork)->work) &obj_hash[i].lock irq_context: softirq rcu_callback prog_idr_lock irq_context: softirq rcu_callback bpf_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock batched_entropy_u8.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock kfence_freelist_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_es_lock key#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock &pa->pa_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle stock_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &xa->xa_lock#8 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle lock#4 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &dd->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &pool->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &memcg->move_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &xa->xa_lock#8 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock key#7 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 tracepoints_mutex pool_lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex &cfs_rq->removed.lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex &obj_hash[i].lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock static_call_mutex text_mutex pool_lock#2 irq_context: 0 rtnl_mutex sk_lock-AF_INET &im->lock &c->lock irq_context: 0 rtnl_mutex sk_lock-AF_INET &c->lock irq_context: 0 rtnl_mutex sk_lock-AF_INET krc.lock irq_context: 0 rtnl_mutex sk_lock-AF_INET batched_entropy_u8.lock irq_context: 0 rtnl_mutex sk_lock-AF_INET kfence_freelist_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &im->lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pcpu_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock &mapping->private_lock irq_context: 0 &map->freeze_mutex irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_node_0 irq_context: 0 nlk_cb_mutex-GENERIC rtnl_mutex irq_context: 0 nlk_cb_mutex-GENERIC rtnl_mutex &dev->power.lock irq_context: 0 nlk_cb_mutex-GENERIC rtnl_mutex &rq->__lock irq_context: 0 nlk_cb_mutex-GENERIC rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nlk_cb_mutex-GENERIC rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 nlk_cb_mutex-GENERIC rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 nlk_cb_mutex-GENERIC rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock batched_entropy_u8.lock crngs.lock irq_context: 0 nlk_cb_mutex-GENERIC rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 nlk_cb_mutex-GENERIC rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 nlk_cb_mutex-GENERIC rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 nlk_cb_mutex-GENERIC rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_gp_mutex &ssp->srcu_sup->srcu_cb_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &wq->mutex &x->wait#10 irq_context: 0 rtnl_mutex wq_mayday_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &xa->xa_lock#3 &n->list_lock irq_context: 0 rtnl_mutex &xa->xa_lock#3 &n->list_lock &c->lock irq_context: 0 rtnl_mutex &wq->mutex &pool->lock/1 irq_context: 0 rtnl_mutex &pool->lock/1 irq_context: 0 rtnl_mutex &pool->lock/1 rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &pool->lock/1 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &pool->lock/1 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nlk_cb_mutex-GENERIC rtnl_mutex.wait_lock irq_context: 0 nlk_cb_mutex-GENERIC &p->pi_lock irq_context: 0 nlk_cb_mutex-GENERIC &p->pi_lock &rq->__lock irq_context: 0 nlk_cb_mutex-GENERIC &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nlk_cb_mutex-GENERIC rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI rcu_read_lock pool_lock#2 irq_context: 0 &xt[i].mutex &base->lock irq_context: 0 &xt[i].mutex &base->lock &obj_hash[i].lock irq_context: 0 epnested_mutex &ep->mtx &ep->mtx/1 &u->lock irq_context: 0 epnested_mutex &ep->mtx &ep->mtx/1 &u->lock &u->peer_wait irq_context: 0 epnested_mutex &ep->mtx &ep->mtx/1 &ws->lock irq_context: 0 epnested_mutex &ep->mtx &ep->mtx/1 &ws->lock tk_core.seq.seqcount irq_context: 0 epnested_mutex &ep->mtx &ep->mtx/1 &ws->lock &obj_hash[i].lock irq_context: 0 epnested_mutex &ep->mtx &ep->mtx/1 &ep->lock &ws->lock irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait &ep->poll_wait/1 &ep->lock irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait &ep->poll_wait/1 &ep->lock &ep->wq irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait &ep->poll_wait/1 &ep->lock &ep->wq &p->pi_lock irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait &ep->poll_wait/1 &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 &u->iolock &u->peer_wait &ei->socket.wq.wait &ep->poll_wait/1 &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx &ep->mtx/1 irq_context: 0 &ep->mtx &ep->mtx/1 &ep->lock irq_context: 0 &ep->mtx &ep->mtx/1 &u->lock irq_context: 0 &ep->mtx &ep->mtx/1 &u->lock &u->peer_wait irq_context: 0 &ep->mtx &ep->mtx/1 &ws->lock irq_context: 0 &ep->mtx &ep->mtx/1 &ws->lock tk_core.seq.seqcount irq_context: 0 &ep->mtx &ep->mtx/1 &ws->lock &obj_hash[i].lock irq_context: 0 &ep->mtx &ep->mtx/1 &ep->lock &ws->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &im->lock irq_context: 0 sk_lock-AF_CAN &mm->mmap_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nfnl_grp_active_lock irq_context: 0 &sb->s_type->i_mutex_key#10 nfnl_grp_active_lock irq_context: 0 &ep->mtx &ep->mtx/1 &ep->lock &ws->lock &obj_hash[i].lock irq_context: 0 &ep->mtx &ep->mtx/1 &rq->__lock irq_context: 0 &ep->mtx &ep->mtx/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ep->mtx &ep->mtx/1 &ep->lock &ws->lock tk_core.seq.seqcount irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 tomoyo_ss &n->list_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 tomoyo_ss &n->list_lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 sb_writers#3 oom_adj_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#3 oom_adj_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#3 oom_adj_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) fill_pool_map-wait-type-override &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#9 &obj_hash[i].lock pool_lock irq_context: 0 &rdma_nl_types[idx].sem devices_rwsem irq_context: 0 rtnl_mutex rcu_read_lock &batadv_netdev_addr_lock_key irq_context: 0 rtnl_mutex (&pmctx->ip6_mc_router_timer) irq_context: 0 rtnl_mutex (&pmctx->ip4_mc_router_timer) irq_context: 0 rtnl_mutex &tn->idrinfo->lock#6 irq_context: 0 rtnl_mutex &tn->idrinfo->lock#6 fs_reclaim irq_context: 0 rtnl_mutex &tn->idrinfo->lock#6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &tn->idrinfo->lock#6 &c->lock irq_context: 0 rtnl_mutex &tn->idrinfo->lock#6 pool_lock#2 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/2 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/2 &bridge_netdev_addr_lock_key/1 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/2 &bridge_netdev_addr_lock_key/1 &c->lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/2 &bridge_netdev_addr_lock_key/1 pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &bond->stats_lock/2 irq_context: 0 &group->mark_mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &group->mark_mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &group->mark_mutex remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &br->lock batched_entropy_u8.lock irq_context: 0 rtnl_mutex &br->lock kfence_freelist_lock irq_context: 0 rtnl_mutex (&br->hello_timer) irq_context: 0 rtnl_mutex (&br->topology_change_timer) irq_context: 0 rtnl_mutex (&br->tcn_timer) irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key/1 krc.lock irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key/1 krc.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &bat_priv->mcast.mla_lock (console_sem).lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &bat_priv->mcast.mla_lock console_lock console_srcu console_owner_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &bat_priv->mcast.mla_lock console_lock console_srcu console_owner irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &bat_priv->mcast.mla_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &bat_priv->mcast.mla_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex remove_cache_srcu &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &base->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rhashtable_bucket irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &q->lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &q->lock#2 &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &q->lock#2 &base->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &q->lock#2 &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &q->lock#2 rcu_read_lock rcu_read_lock rhashtable_bucket irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &q->lock#2 rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &q->lock#2 rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &q->lock#2 rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &q->lock#2 rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &q->lock#2 rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &q->lock#2 rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &q->lock#2 pool_lock#2 irq_context: softirq (&peer->timer_send_keepalive) &n->list_lock irq_context: softirq (&peer->timer_send_keepalive) &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg1#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh &sch->q.lock pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh &sch->q.lock &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u8.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &n->list_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u8.lock irq_context: 0 rtnl_mutex (switchdev_blocking_notif_chain).rwsem &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx fs_reclaim irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &c->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &local->queue_stop_reason_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock tk_core.seq.seqcount irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock hwsim_radio_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock hwsim_radio_lock pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock hwsim_radio_lock &list->lock#16 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &list->lock#16 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx nl_table_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx nl_table_wait.lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &data->mutex irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &sta->lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &sta->lock pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &sta->lock &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &sta->lock krc.lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &rq->__lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy4 (work_completion)(&link->csa_finalize_work) irq_context: 0 (wq_completion)phy4 (work_completion)(&link->csa_finalize_work) &wdev->mtx irq_context: 0 (wq_completion)phy4 (work_completion)(&link->csa_finalize_work) &wdev->mtx &lock->wait_lock irq_context: 0 (wq_completion)phy4 (work_completion)(&link->csa_finalize_work) &wdev->mtx &rq->__lock irq_context: 0 (wq_completion)phy4 (work_completion)(&link->csa_finalize_work) &wdev->mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock hwsim_radio_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &list->lock#16 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx nl_table_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx nl_table_wait.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &sta->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &sta->lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &sta->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &sta->lock krc.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy6 (work_completion)(&link->csa_finalize_work) irq_context: 0 (wq_completion)phy6 (work_completion)(&link->csa_finalize_work) &wdev->mtx irq_context: 0 (wq_completion)phy6 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx irq_context: 0 (wq_completion)phy6 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx irq_context: 0 (wq_completion)phy6 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx &rdev->bss_lock irq_context: 0 (wq_completion)phy6 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx fs_reclaim irq_context: 0 (wq_completion)phy6 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)phy6 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx &c->lock irq_context: 0 (wq_completion)phy6 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx &____s->seqcount#2 irq_context: 0 (wq_completion)phy6 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx &____s->seqcount irq_context: 0 (wq_completion)phy6 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx pool_lock#2 irq_context: 0 (wq_completion)phy6 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx &obj_hash[i].lock irq_context: 0 (wq_completion)phy6 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx krc.lock irq_context: 0 (wq_completion)phy6 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)phy6 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)phy6 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)phy6 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)phy6 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy6 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx nl_table_lock irq_context: 0 (wq_completion)phy6 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx nl_table_wait.lock irq_context: 0 cb_lock &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx krc.lock irq_context: 0 (wq_completion)phy4 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx irq_context: 0 (wq_completion)phy4 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx irq_context: 0 (wq_completion)phy4 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx &rdev->bss_lock irq_context: 0 (wq_completion)phy4 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx fs_reclaim irq_context: 0 (wq_completion)phy4 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)phy4 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx &obj_hash[i].lock irq_context: 0 (wq_completion)phy4 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx krc.lock irq_context: 0 (wq_completion)phy4 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)phy4 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)phy4 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)phy4 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)phy4 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)phy4 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx &rq->__lock irq_context: 0 (wq_completion)phy4 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx &c->lock irq_context: 0 (wq_completion)phy4 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx &n->list_lock irq_context: 0 (wq_completion)phy4 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx &n->list_lock &c->lock irq_context: 0 (wq_completion)phy4 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx nl_table_lock irq_context: 0 (wq_completion)phy4 (work_completion)(&link->csa_finalize_work) &wdev->mtx &local->mtx &local->chanctx_mtx nl_table_wait.lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_NETROM rcu_read_lock &sec->lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#32 rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#32 rcu_read_lock &wb->work_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#32 rcu_read_lock &wb->work_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#32 rcu_read_lock &wb->work_lock &base->lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#32 rcu_read_lock &wb->work_lock &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#32 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#32 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#32 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#32 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#32 &bdi->wb_waitq irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#32 &rq->__lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#32 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &mapping->i_mmap_rwsem mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &mapping->i_mmap_rwsem ptlock_ptr(page)#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &mapping->private_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem rcu_read_lock &memcg->move_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem rcu_read_lock &xa->xa_lock#8 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem rcu_read_lock &xa->xa_lock#8 &s->s_inode_wblist_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &mapping->i_mmap_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &wb->list_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 &mm->mmap_lock sb_pagefaults &sb->s_type->i_lock_key#22 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &bat_priv->mcast.mla_lock console_owner_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &n->list_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex &n->list_lock &c->lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock stock_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh k-slock-AF_INET6 rcu_read_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh k-slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 rtnl_mutex &br->lock &br->hash_lock &n->list_lock irq_context: 0 rtnl_mutex &br->lock &br->hash_lock &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &journal->j_wait_transaction_locked irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key/1 &n->list_lock irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key/1 &n->list_lock &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key/1 &c->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &table->lock#3 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u8.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock kfence_freelist_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &meta->lock irq_context: 0 rtnl_mutex bpf_devs_lock fs_reclaim irq_context: 0 rtnl_mutex bpf_devs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex bpf_devs_lock stock_lock irq_context: 0 rtnl_mutex bpf_devs_lock pool_lock#2 irq_context: 0 rtnl_mutex bpf_devs_lock &____s->seqcount irq_context: 0 rtnl_mutex bpf_devs_lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex bpf_devs_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex bpf_devs_lock &rq->__lock irq_context: 0 bpf_devs_lock irq_context: 0 bpf_devs_lock &nmap->mutex irq_context: 0 bpf_devs_lock &nmap->mutex &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rtnl_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rtnl_mutex bpf_devs_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rtnl_mutex bpf_devs_lock stock_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rtnl_mutex bpf_devs_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rtnl_mutex bpf_devs_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock crngs.lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock base_crng.lock irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&peer->transmit_packet_work) batched_entropy_u8.lock crngs.lock irq_context: 0 rtnl_mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 rtnl_mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 rtnl_mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex bpf_devs_lock &c->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rtnl_mutex.wait_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u8.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u8.lock irq_context: softirq (&n->timer) &n->lock batched_entropy_u8.lock irq_context: softirq (&n->timer) &n->lock kfence_freelist_lock irq_context: 0 sk_lock-AF_AX25 ax25_uid_lock irq_context: 0 cb_lock genl_mutex &bat_priv->tp_list_lock irq_context: 0 cb_lock genl_mutex &bat_priv->tp_list_lock &c->lock irq_context: 0 cb_lock genl_mutex &bat_priv->tp_list_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex &bat_priv->tp_list_lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex kthread_create_lock irq_context: 0 cb_lock genl_mutex &x->wait irq_context: 0 rcu_read_lock rcu_read_lock_bh icmp_global.lock irq_context: 0 rcu_read_lock rcu_read_lock_bh icmp_global.lock batched_entropy_u8.lock irq_context: 0 rcu_read_lock rcu_read_lock_bh k-slock-AF_INET rcu_read_lock &c->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh k-slock-AF_INET rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock_bh k-slock-AF_INET rcu_read_lock &dir->lock#2 irq_context: 0 rcu_read_lock rcu_read_lock_bh k-slock-AF_INET rcu_read_lock &ul->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh k-slock-AF_INET rcu_read_lock &n->list_lock irq_context: 0 rcu_read_lock rcu_read_lock_bh k-slock-AF_INET rcu_read_lock &n->list_lock &c->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh k-slock-AF_INET &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock_bh k-slock-AF_INET pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock_bh k-slock-AF_INET batched_entropy_u32.lock irq_context: 0 rcu_read_lock rcu_read_lock_bh k-slock-AF_INET rcu_read_lock &____s->seqcount#10 irq_context: 0 rcu_read_lock rcu_read_lock_bh k-slock-AF_INET rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 rcu_read_lock rcu_read_lock_bh k-slock-AF_INET rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 rcu_read_lock rcu_read_lock_bh k-slock-AF_INET &c->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh k-slock-AF_INET rcu_read_lock &____s->seqcount#2 irq_context: 0 &bat_priv->tp_list_lock irq_context: 0 (&tp_vars->timer) irq_context: 0 &tp_vars->unacked_lock irq_context: 0 rcu_read_lock rcu_read_lock_bh k-slock-AF_INET rcu_read_lock key#22 irq_context: 0 rcu_read_lock rcu_read_lock_bh k-slock-AF_INET &____s->seqcount#2 irq_context: 0 rcu_read_lock rcu_read_lock_bh k-slock-AF_INET &____s->seqcount irq_context: 0 rcu_read_lock rcu_read_lock_bh k-slock-AF_INET &n->list_lock irq_context: 0 rcu_read_lock rcu_read_lock_bh k-slock-AF_INET &n->list_lock &c->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh k-slock-AF_INET rcu_read_lock &____s->seqcount irq_context: 0 rcu_read_lock rcu_read_lock_bh k-slock-AF_INET batched_entropy_u32.lock crngs.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &cfs_rq->removed.lock irq_context: 0 rcu_read_lock rcu_read_lock_bh k-slock-AF_INET &obj_hash[i].lock pool_lock irq_context: 0 rcu_read_lock rcu_read_lock_bh icmp_global.lock batched_entropy_u8.lock crngs.lock irq_context: 0 proto_tab_lock &n->list_lock irq_context: 0 proto_tab_lock &n->list_lock &c->lock irq_context: softirq drivers/net/wireguard/ratelimiter.c:20 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG inet_diag_table_mutex &h->lhash2[i].lock k-clock-AF_INET6 irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG inet_diag_table_mutex &cfs_rq->removed.lock irq_context: 0 rds_cong_monitor_lock irq_context: 0 &rs->rs_recv_lock irq_context: 0 &rs->rs_recv_lock &rs->rs_lock irq_context: 0 rcu_read_lock rcu_read_lock_bh k-slock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock_bh k-slock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 rcu_read_lock rcu_read_lock_bh k-slock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock_bh k-slock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh k-slock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 rcu_read_lock rcu_read_lock_bh k-slock-AF_INET rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh k-slock-AF_INET &pcp->lock &zone->lock irq_context: softirq fs/notify/mark.c:89 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock &c->lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u8.lock irq_context: 0 cb_lock genl_mutex &fn->fou_lock irq_context: 0 sb_writers#8 &cfs_rq->removed.lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock &n->list_lock irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex &br->lock &____s->seqcount irq_context: 0 rtnl_mutex &br->lock rcu_read_lock pool_lock#2 irq_context: 0 &ppp->wlock &obj_hash[i].lock irq_context: 0 &ppp->wlock pool_lock#2 irq_context: 0 (wq_completion)tipc_send#3 irq_context: 0 (wq_completion)tipc_send#3 (work_completion)(&con->swork) irq_context: 0 (wq_completion)tipc_send#3 (work_completion)(&con->swork) &con->outqueue_lock irq_context: 0 (wq_completion)tipc_send#3 (work_completion)(&con->swork) &c->lock irq_context: 0 (wq_completion)tipc_send#3 (work_completion)(&con->swork) pool_lock#2 irq_context: 0 (wq_completion)tipc_send#3 (work_completion)(&con->swork) tk_core.seq.seqcount irq_context: 0 (wq_completion)tipc_send#3 (work_completion)(&con->swork) &list->lock#5 irq_context: 0 (wq_completion)tipc_send#3 (work_completion)(&con->swork) &list->lock#42 irq_context: 0 (wq_completion)tipc_send#3 (work_completion)(&con->swork) slock-AF_TIPC &list->lock#42 irq_context: 0 (wq_completion)tipc_send#3 (work_completion)(&con->swork) slock-AF_TIPC &obj_hash[i].lock irq_context: 0 (wq_completion)tipc_send#3 (work_completion)(&con->swork) &con->outqueue_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_TIPC slock-AF_TIPC &n->list_lock irq_context: 0 sk_lock-AF_TIPC slock-AF_TIPC &n->list_lock &c->lock irq_context: 0 (wq_completion)tipc_send#3 (work_completion)(&con->swork) slock-AF_TIPC &c->lock irq_context: 0 (wq_completion)tipc_send#3 (work_completion)(&con->swork) &list->lock#23 irq_context: 0 (wq_completion)tipc_send#3 (work_completion)(&con->swork) slock-AF_TIPC &list->lock#23 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &list->lock#5 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC &tn->nametbl_lock &c->lock irq_context: 0 (wq_completion)tipc_send#3 (work_completion)(&con->swork) &srv->idr_lock irq_context: 0 (wq_completion)tipc_send#3 (work_completion)(&con->swork) &srv->idr_lock &obj_hash[i].lock irq_context: 0 (wq_completion)tipc_send#3 (work_completion)(&con->swork) &obj_hash[i].lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] rcu_node_0 irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] &rcu_state.expedited_wq irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] &rcu_state.expedited_wq &p->pi_lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#5 &n->list_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &n->list_lock &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#5 &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &root->kernfs_rwsem pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#5 krc.lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_state.exp_mutex rcu_node_0 irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_state.exp_mutex &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#5 &dev_addr_list_lock_key#2/1 irq_context: 0 rtnl_mutex team->team_lock_key#5 &dev_addr_list_lock_key#2/1 _xmit_ETHER irq_context: 0 rtnl_mutex team->team_lock_key#5 &dev_addr_list_lock_key#2/1 _xmit_ETHER &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &dev_addr_list_lock_key#2/1 _xmit_ETHER pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#5 &dev_addr_list_lock_key#2/1 _xmit_ETHER krc.lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &pn->hash_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &dev->tx_global_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &dev->tx_global_lock _xmit_ETHER#2 irq_context: 0 rtnl_mutex team->team_lock_key#5 &dev->tx_global_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 rtnl_mutex team->team_lock_key#5 &sch->q.lock irq_context: 0 rtnl_mutex team->team_lock_key#5 lweventlist_lock pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#5 &base->lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_node_0 irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex team->team_lock_key#5 __ip_vs_mutex irq_context: 0 rtnl_mutex team->team_lock_key#5 __ip_vs_mutex &ipvs->dest_trash_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 _xmit_ETHER &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#5 _xmit_ETHER pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#5 _xmit_ETHER krc.lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &tbl->lock irq_context: 0 rtnl_mutex team->team_lock_key#5 class irq_context: 0 rtnl_mutex team->team_lock_key#5 (&tbl->proxy_timer) irq_context: 0 rtnl_mutex team->team_lock_key#5 flowtable_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 flowtable_lock &ht->lock irq_context: 0 rtnl_mutex team->team_lock_key#5 flowtable_lock rcu_read_lock &ht->lock irq_context: 0 rtnl_mutex team->team_lock_key#5 flowtable_lock &(&flowtable->gc_work)->timer irq_context: 0 rtnl_mutex team->team_lock_key#5 flowtable_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#5 flowtable_lock &base->lock irq_context: 0 rtnl_mutex team->team_lock_key#5 flowtable_lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#5 flowtable_lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex team->team_lock_key#5 flowtable_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#5 flowtable_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#5 flowtable_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 flowtable_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#5 flowtable_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#5 flowtable_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#5 flowtable_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#5 flowtable_lock (work_completion)(&(&flowtable->gc_work)->work) irq_context: 0 rtnl_mutex team->team_lock_key#5 flowtable_lock (wq_completion)nf_ft_offload_add irq_context: 0 rtnl_mutex team->team_lock_key#5 flowtable_lock &wq->mutex irq_context: 0 rtnl_mutex team->team_lock_key#5 flowtable_lock &wq->mutex &pool->lock/1 irq_context: 0 rtnl_mutex team->team_lock_key#5 flowtable_lock &wq->mutex &x->wait#10 irq_context: 0 rtnl_mutex team->team_lock_key#5 flowtable_lock (wq_completion)nf_ft_offload_del irq_context: 0 rtnl_mutex team->team_lock_key#5 flowtable_lock (wq_completion)nf_ft_offload_stats irq_context: 0 rtnl_mutex team->team_lock_key#5 &dir->lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem quarantine_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &tb->tb6_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &tb->tb6_lock rt6_exception_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &tb->tb6_lock pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &tb->tb6_lock nl_table_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &tb->tb6_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &tb->tb6_lock nl_table_wait.lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &tb->tb6_lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &tb->tb6_lock rcu_read_lock &data->fib_event_queue_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#5 &ul->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#5 &net->ipv6.addrconf_hash_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &net->ipv6.addrconf_hash_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &ndev->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &ifa->lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &idev->mc_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &idev->mc_lock _xmit_ETHER irq_context: 0 rtnl_mutex team->team_lock_key#5 &idev->mc_lock _xmit_ETHER &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &idev->mc_lock _xmit_ETHER pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#5 &idev->mc_lock _xmit_ETHER krc.lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &idev->mc_lock fs_reclaim irq_context: 0 rtnl_mutex team->team_lock_key#5 &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex team->team_lock_key#5 &idev->mc_lock pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#5 &idev->mc_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &idev->mc_lock &base->lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &idev->mc_lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &idev->mc_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#5 &tb->tb6_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &dir->lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &net->sctp.local_addr_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock krc.lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_state.exp_mutex &cfs_rq->removed.lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_state.exp_mutex pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#5 &idev->mc_query_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &idev->mc_query_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#5 (work_completion)(&(&idev->mc_report_work)->work) irq_context: 0 rtnl_mutex team->team_lock_key#5 &tbl->lock &n->lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &tbl->lock pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#5 &tbl->lock &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &tbl->lock &n->lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &tbl->lock &n->lock &____s->seqcount#9 irq_context: 0 rtnl_mutex team->team_lock_key#5 &tbl->lock nl_table_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &tbl->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &tbl->lock nl_table_wait.lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &tbl->lock rcu_read_lock lock#8 irq_context: 0 rtnl_mutex team->team_lock_key#5 &tbl->lock rcu_read_lock id_table_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &tbl->lock &dir->lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#5 &tbl->lock krc.lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &net->ipv6.fib6_gc_lock rcu_read_lock &tb->tb6_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &net->ipv6.fib6_gc_lock rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &net->ipv6.fib6_gc_lock &obj_hash[i].lock irq_context: softirq rcu_callback &n->list_lock irq_context: softirq rcu_callback &n->list_lock &c->lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG &net->unx.table.locks[i] irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG &net->unx.table.locks[i] &u->lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG &net->unx.table.locks[i] &u->lock clock-AF_UNIX irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG &net->unx.table.locks[i] clock-AF_UNIX irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG &net->unx.table.locks[i] rlock-AF_UNIX irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG &net->unx.table.locks[i] rlock-AF_UNIX &u->lock/1 irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG &net->unx.table.locks[i] rlock-AF_UNIX &u->lock/1 clock-AF_UNIX irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &idev->mc_lock _xmit_ETHER &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#5 pcpu_alloc_mutex irq_context: 0 rtnl_mutex team->team_lock_key#5 pcpu_alloc_mutex pcpu_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &tb->tb6_lock pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#5 &tb->tb6_lock nl_table_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &tb->tb6_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &tb->tb6_lock nl_table_wait.lock irq_context: 0 rtnl_mutex team->team_lock_key#5 (inet6addr_validator_chain).rwsem irq_context: 0 rtnl_mutex team->team_lock_key#5 stock_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &net->ipv6.addrconf_hash_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key#5 rcu_read_lock &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#5 pcpu_lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &tb->tb6_lock &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key#5 &ndev->lock &ifa->lock irq_context: 0 rtnl_mutex team->team_lock_key#5 lock kernfs_idr_lock pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#5 &dev_addr_list_lock_key#2/1 _xmit_ETHER &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex (console_sem).lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex console_lock console_srcu console_owner_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex console_lock console_srcu console_owner irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex pgd_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex key irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex pcpu_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex percpu_counters_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex pcpu_lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex nl_table_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex nl_table_wait.lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &batadv_netdev_addr_lock_key/1 irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG &n->list_lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG &n->list_lock &c->lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG &rq->__lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sock_diag_mutex sock_diag_table_mutex rcu_read_lock &c->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ax25_uid_lock irq_context: 0 sk_lock-AF_AX25 ax25_list_lock irq_context: 0 sk_lock-AF_AX25 &rq->__lock irq_context: 0 sk_lock-AF_AX25 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &dev->tx_global_lock _xmit_TUNNEL#2 irq_context: 0 rtnl_mutex &dev->tx_global_lock _xmit_IPGRE#2 irq_context: 0 rtnl_mutex &r->consumer_lock#6 irq_context: 0 rtnl_mutex &r->consumer_lock#6 &r->producer_lock#3 irq_context: 0 rtnl_mutex &dev->tx_global_lock _xmit_TUNNEL6#2 irq_context: 0 rtnl_mutex &dev->tx_global_lock _xmit_SIT#2 irq_context: softirq rcu_read_lock &br->multicast_lock &____s->seqcount#2 irq_context: softirq rcu_read_lock &br->multicast_lock &n->list_lock irq_context: softirq rcu_read_lock &br->multicast_lock &n->list_lock &c->lock irq_context: softirq rcu_read_lock &br->multicast_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: softirq rcu_read_lock &br->multicast_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock &br->multicast_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex &dev->tx_global_lock &vlan_netdev_xmit_lock_key irq_context: 0 rtnl_mutex &dev->tx_global_lock &qdisc_xmit_lock_key irq_context: 0 rtnl_mutex dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 irq_context: 0 rtnl_mutex dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 &sch->q.lock irq_context: 0 rtnl_mutex &dev->tx_global_lock &qdisc_xmit_lock_key#5 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &meta->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock kfence_freelist_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &meta->lock irq_context: 0 rtnl_mutex &r->consumer_lock &r->producer_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &r->consumer_lock &r->producer_lock pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key/1 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key/1 &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key/1 pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock pool_lock#2 irq_context: 0 lock#3 &cfs_rq->removed.lock irq_context: 0 &fp->aux->used_maps_mutex fs_reclaim irq_context: 0 &fp->aux->used_maps_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &fp->aux->used_maps_mutex &c->lock irq_context: 0 &fp->aux->used_maps_mutex pool_lock#2 irq_context: 0 &fp->aux->used_maps_mutex &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock &____s->seqcount irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock rcu_read_lock rlock-AF_KEY irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_policy_lock &____s->seqcount#16 irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_policy_lock &____s->seqcount#16 irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_policy_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_policy_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_policy_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_policy_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_policy_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_policy_lock krc.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &meta->lock irq_context: 0 rtnl_mutex rcu_read_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 rtnl_mutex rcu_read_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 rtnl_mutex rcu_read_lock fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 rtnl_mutex rcu_read_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &table->lock#3 irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &n->list_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &n->list_lock &c->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex rcu_read_lock rcu_read_lock &c->lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_policy_lock &obj_hash[i].lock pool_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC ovs_mutex irq_context: 0 &nft_net->commit_mutex cpu_hotplug_lock irq_context: 0 &nft_net->commit_mutex cpu_hotplug_lock jump_label_mutex irq_context: 0 &nft_net->commit_mutex cpu_hotplug_lock jump_label_mutex text_mutex irq_context: 0 &nft_net->commit_mutex cpu_hotplug_lock jump_label_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 &nft_net->commit_mutex cpu_hotplug_lock jump_label_mutex text_mutex &rq->__lock irq_context: 0 &nft_net->commit_mutex cpu_hotplug_lock jump_label_mutex text_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET free_vmap_area_lock irq_context: 0 sk_lock-AF_INET vmap_area_lock irq_context: 0 sk_lock-AF_INET pcpu_alloc_mutex irq_context: 0 sk_lock-AF_INET pcpu_alloc_mutex pcpu_lock irq_context: 0 sk_lock-AF_INET pack_mutex irq_context: 0 sk_lock-AF_INET text_mutex irq_context: 0 sk_lock-AF_INET text_mutex ptlock_ptr(page)#2 irq_context: 0 sk_lock-AF_INET &fp->aux->used_maps_mutex irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 sk_lock-AF_INET hrtimer_bases.lock irq_context: 0 sk_lock-AF_INET hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &table->lock#3 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &f->f_owner.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &table->lock#3 irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock &____s->seqcount irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI mgmt_chan_list_lock &hdev->lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_RFCOMM rfcomm_ioctl_mutex irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_RFCOMM rfcomm_ioctl_mutex &mm->mmap_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 &xs->mutex &obj_hash[i].lock irq_context: 0 sock_diag_mutex sock_diag_table_mutex nlk_cb_mutex-SOCK_DIAG &net->packet.sklist_lock irq_context: softirq rcu_read_lock rcu_read_lock &br->multicast_lock irq_context: softirq rcu_read_lock rcu_read_lock &br->multicast_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: softirq rcu_read_lock rcu_read_lock &br->multicast_lock nl_table_lock irq_context: softirq rcu_read_lock rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: softirq rcu_read_lock rcu_read_lock &br->multicast_lock &base->lock irq_context: softirq rcu_read_lock rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PPPOX chan_lock irq_context: 0 ebt_mutex ebt_mutex.wait_lock irq_context: 0 ebt_mutex.wait_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC &ht->lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock &ht->lock irq_context: 0 rtnl_mutex sysctl_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_AX25 &ei->socket.wq.wait irq_context: 0 sk_lock-AF_AX25 rlock-AF_AX25 irq_context: 0 sk_lock-AF_AX25 rcu_read_lock &ei->socket.wq.wait irq_context: 0 sk_lock-AF_AX25 rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 sk_lock-AF_AX25 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_AX25 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_AX25 rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_AX25 rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_lock_key#22 bit_wait_table + i irq_context: 0 cb_lock &rdev->wiphy.mtx &local->mtx rcu_read_lock hwsim_radio_lock &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &pcp->lock &zone->lock irq_context: 0 &fsnotify_mark_srcu fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &fsnotify_mark_srcu fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &mm->mmap_lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 fs_reclaim irq_context: 0 rtnl_mutex sk_lock-AF_INET6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex sk_lock-AF_INET6 pool_lock#2 irq_context: 0 rtnl_mutex sk_lock-AF_INET6 rcu_read_lock rcu_read_lock stock_lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &obj_hash[i].lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &ndev->lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &ndev->lock &c->lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &ndev->lock pool_lock#2 irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &ndev->lock &____s->seqcount irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &ndev->lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &ndev->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &ndev->lock &dir->lock#2 irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &ndev->lock pcpu_lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 acaddr_hash_lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &tb->tb6_lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &tb->tb6_lock stock_lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &tb->tb6_lock pool_lock#2 irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &tb->tb6_lock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &tb->tb6_lock rcu_read_lock &data->fib_event_queue_lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &tb->tb6_lock &c->lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &tb->tb6_lock nl_table_lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &tb->tb6_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &tb->tb6_lock nl_table_wait.lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &rq->__lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &c->lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &tb->tb6_lock rcu_read_lock &c->lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &tb->tb6_lock rt6_exception_lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &ndev->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex acaddr_hash_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &tb->tb6_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &tb->tb6_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &tb->tb6_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &tb->tb6_lock rt6_exception_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &tb->tb6_lock rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &tb->tb6_lock rcu_read_lock &data->fib_event_queue_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &tb->tb6_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &tb->tb6_lock nl_table_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &tb->tb6_lock nl_table_wait.lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock &meta->lock irq_context: 0 nf_sockopt_mutex pgd_lock irq_context: 0 nf_sockopt_mutex stock_lock irq_context: 0 nf_sockopt_mutex rcu_read_lock pool_lock#2 irq_context: 0 nf_sockopt_mutex &obj_hash[i].lock irq_context: 0 nf_sockopt_mutex key irq_context: 0 nf_sockopt_mutex pcpu_lock irq_context: 0 nf_sockopt_mutex percpu_counters_lock irq_context: 0 nf_sockopt_mutex pcpu_lock stock_lock irq_context: 0 nf_sockopt_mutex pool_lock#2 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem batched_entropy_u8.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem kfence_freelist_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &root->kernfs_rwsem quarantine_lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &n->list_lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &n->list_lock &c->lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &tb->tb6_lock &n->list_lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &tb->tb6_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &tb->tb6_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex &tb->tb6_lock &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 sb_writers#4 rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle irq_context: 0 rtnl_mutex &idev->mc_lock _xmit_ETHER &____s->seqcount#2 irq_context: 0 rtnl_mutex sk_lock-AF_INET6 rcu_read_lock stock_lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex sk_lock-AF_INET6 rcu_read_lock &c->lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 rcu_read_lock &n->list_lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 rcu_read_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 rcu_read_lock &dir->lock#2 irq_context: 0 rtnl_mutex sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &c->lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 remove_cache_srcu irq_context: 0 rtnl_mutex sk_lock-AF_INET6 remove_cache_srcu &rq->__lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex sk_lock-AF_INET6 remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq net/ipv6/ip6_flowlabel.c:57 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq net/ipv6/ip6_flowlabel.c:57 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &tb->tb6_lock rcu_read_lock &n->list_lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &tb->tb6_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) &group->avgs_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex pgd_lock irq_context: 0 cb_lock rtnl_mutex stock_lock irq_context: 0 cb_lock rtnl_mutex rcu_read_lock pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex key irq_context: 0 cb_lock rtnl_mutex pcpu_lock irq_context: 0 cb_lock rtnl_mutex percpu_counters_lock irq_context: 0 cb_lock rtnl_mutex pcpu_lock stock_lock irq_context: 0 cb_lock rtnl_mutex pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &cfs_rq->removed.lock irq_context: 0 &xt[i].mutex fs_reclaim &cfs_rq->removed.lock irq_context: 0 &xt[i].mutex fs_reclaim &obj_hash[i].lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex sk_lock-AF_INET6 rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &ndev->lock &n->list_lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &ndev->lock &n->list_lock &c->lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 mapping.invalidate_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex kernfs_idr_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock irq_context: 0 sb_writers#8 kn->active#5 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#3 rcu_read_lock &p->alloc_lock irq_context: 0 cb_lock &devlink->lock_key#3 pin_fs_lock irq_context: 0 cb_lock &devlink->lock_key#3 &dentry->d_lock irq_context: 0 cb_lock &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 irq_context: 0 cb_lock &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 cb_lock &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 cb_lock &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 cb_lock &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 pin_fs_lock irq_context: 0 cb_lock &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock irq_context: 0 cb_lock &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 cb_lock &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 mount_lock irq_context: 0 cb_lock &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 mount_lock mount_lock.seqcount irq_context: 0 cb_lock &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 rcu_read_lock &dentry->d_lock irq_context: 0 cb_lock &devlink->lock_key#3 rcu_read_lock &dentry->d_lock irq_context: 0 cb_lock &devlink->lock_key#3 &fsnotify_mark_srcu irq_context: 0 cb_lock &devlink->lock_key#3 &fsnotify_mark_srcu &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#3 &fsnotify_mark_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 cb_lock &devlink->lock_key#3 &s->s_inode_list_lock irq_context: 0 cb_lock &devlink->lock_key#3 &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#3 &xa->xa_lock#8 irq_context: 0 cb_lock &devlink->lock_key#3 &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 rcu_read_lock mount_lock irq_context: 0 cb_lock &devlink->lock_key#3 rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 cb_lock &devlink->lock_key#3 mount_lock irq_context: 0 cb_lock &devlink->lock_key#3 mount_lock mount_lock.seqcount irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex netpoll_srcu irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex netpoll_srcu &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex netpoll_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex net_rwsem irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &pn->hash_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &tn->lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &dev->tx_global_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &dev->tx_global_lock _xmit_ETHER#2 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &dev->tx_global_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &sch->q.lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex fs_reclaim irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &c->lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex nl_table_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex nl_table_wait.lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex __ip_vs_mutex irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex __ip_vs_mutex &ipvs->dest_trash_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex _xmit_ETHER irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex _xmit_ETHER &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex _xmit_ETHER pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex _xmit_ETHER krc.lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &im->lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex krc.lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &tbl->lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex class irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex (&tbl->proxy_timer) irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &base->lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &net->xfrm.xfrm_state_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &net->xfrm.xfrm_policy_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex flowtable_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex flowtable_lock &ht->lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex flowtable_lock rcu_read_lock &ht->lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex flowtable_lock &(&flowtable->gc_work)->timer irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex flowtable_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex flowtable_lock &base->lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex flowtable_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex flowtable_lock rcu_read_lock &pool->lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex flowtable_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex flowtable_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex flowtable_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex flowtable_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex flowtable_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex flowtable_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex flowtable_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex flowtable_lock (work_completion)(&(&flowtable->gc_work)->work) irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex flowtable_lock (wq_completion)nf_ft_offload_add irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex flowtable_lock &wq->mutex irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex flowtable_lock &wq->mutex &pool->lock/1 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex flowtable_lock &wq->mutex &x->wait#10 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex flowtable_lock (wq_completion)nf_ft_offload_del irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex flowtable_lock (wq_completion)nf_ft_offload_stats irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &dir->lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &pool->lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &tb->tb6_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &tb->tb6_lock rt6_exception_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &tb->tb6_lock pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &tb->tb6_lock &____s->seqcount irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &tb->tb6_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &tb->tb6_lock nl_table_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &tb->tb6_lock nl_table_wait.lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock &data->fib_event_queue_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &tb->tb6_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &tb->tb6_lock &n->list_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &tb->tb6_lock &n->list_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &ul->lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &tbl->lock &n->lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &tbl->lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &tbl->lock pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &tbl->lock &n->lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &tbl->lock &n->lock &____s->seqcount#9 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &tbl->lock nl_table_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &tbl->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &tbl->lock nl_table_wait.lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &tbl->lock rcu_read_lock lock#8 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &tbl->lock rcu_read_lock id_table_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &tbl->lock &dir->lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &tbl->lock krc.lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &net->ipv6.addrconf_hash_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &net->ipv6.addrconf_hash_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &ndev->lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &ndev->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &ndev->lock &base->lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &ndev->lock &base->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &ifa->lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &tb->tb6_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &dir->lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock krc.lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &idev->mc_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &idev->mc_lock _xmit_ETHER &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &idev->mc_lock _xmit_ETHER pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &idev->mc_lock _xmit_ETHER krc.lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &idev->mc_query_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &idev->mc_query_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex (work_completion)(&(&idev->mc_report_work)->work) irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &net->ipv6.fib6_gc_lock rcu_read_lock &tb->tb6_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &net->ipv6.fib6_gc_lock rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &net->ipv6.fib6_gc_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex dev_base_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex dev_base_lock &xa->xa_lock#3 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex cpu_hotplug_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex cpu_hotplug_lock &list->lock#5 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &dir->lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex bpf_devs_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex bpf_devs_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &hwstats->hwsdev_list_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex (work_completion)(&(&devlink_port->type_warn_dw)->work) irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &devlink_port->type_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &in_dev->mc_tomb_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex sysctl_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex sysctl_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex sysctl_lock pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex sysctl_lock krc.lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &ul->lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &net->xdp.lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex mirred_list_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &nft_net->commit_mutex irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &nft_net->commit_mutex &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex proc_subdir_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &ent->pde_unload_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &idev->mc_report_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &idev->mc_lock pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &idev->mc_lock krc.lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &____s->seqcount irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &pnn->pndevs.lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &pnn->routes.lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &pnettable->lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex smc_ib_devices.mutex irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex target_list_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex (console_sem).lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex console_lock console_srcu console_owner_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex console_lock console_srcu console_owner irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &cfs_rq->removed.lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex remove_cache_srcu irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex remove_cache_srcu &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &base->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &root->kernfs_rwsem irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex uevent_sock_mutex irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex uevent_sock_mutex &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex sysfs_symlink_target_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex kernfs_idr_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &k->list_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &root->kernfs_rwsem irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex batched_entropy_u8.lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex kfence_freelist_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &meta->lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex dev_hotplug_mutex irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex dev_hotplug_mutex &k->k_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex dev_hotplug_mutex &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex dev_pm_qos_sysfs_mtx irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &k->k_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &k->k_lock klist_remove_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex subsys mutex#17 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex subsys mutex#17 &k->k_lock klist_remove_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &x->wait#9 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex dpm_list_mtx irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &dev->power.lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex deferred_probe_mutex irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex device_links_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex uevent_sock_mutex mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex gdp_mutex irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex cpu_hotplug_lock xps_map_mutex irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex pin_fs_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &dentry->d_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 tk_core.seq.seqcount irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 rename_lock.seqcount irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 pin_fs_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 mount_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 mount_lock mount_lock.seqcount irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock &dentry->d_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &dentry->d_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &fsnotify_mark_srcu irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_lock_key#7 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &s->s_inode_list_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &xa->xa_lock#8 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock mount_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock mount_lock mount_lock.seqcount irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex mount_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex mount_lock mount_lock.seqcount irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex bpf_devs_lock rcu_read_lock rhashtable_bucket irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex bpf_devs_lock rcu_read_lock rcu_node_0 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex bpf_devs_lock rcu_read_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex bpf_devs_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex bpf_devs_lock pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex.wait_lock irq_context: 0 cb_lock &devlink->lock_key#3 &p->pi_lock irq_context: 0 cb_lock &devlink->lock_key#3 &p->pi_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#3 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#3 rcu_state.barrier_mutex irq_context: 0 cb_lock &devlink->lock_key#3 rcu_state.barrier_mutex rcu_state.barrier_lock irq_context: 0 cb_lock &devlink->lock_key#3 rcu_state.barrier_mutex rcu_state.barrier_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 rcu_state.barrier_mutex &x->wait#24 irq_context: 0 cb_lock &devlink->lock_key#3 rcu_state.barrier_mutex &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#3 rcu_state.barrier_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#3 dev_base_lock irq_context: 0 cb_lock &devlink->lock_key#3 lweventlist_lock irq_context: 0 cb_lock &devlink->lock_key#3 netdev_unregistering_wq.lock irq_context: 0 cb_lock &devlink->lock_key#3 krc.lock irq_context: 0 cb_lock &devlink->lock_key#3 &dir->lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 &dir->lock#2 &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 &dir->lock#2 pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 &dentry->d_lock &dentry->d_lock/1 irq_context: 0 cb_lock &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 &fsnotify_mark_srcu irq_context: 0 cb_lock &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 cb_lock &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 cb_lock &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 &xa->xa_lock#8 irq_context: 0 cb_lock &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 &obj_hash[i].lock pool_lock irq_context: 0 cb_lock &devlink->lock_key#3 &base->lock irq_context: 0 cb_lock &devlink->lock_key#3 &base->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 (work_completion)(&(&devlink_port->type_warn_dw)->work) irq_context: 0 cb_lock &devlink->lock_key#3 fs_reclaim irq_context: 0 cb_lock &devlink->lock_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &devlink->lock_key#3 &devlink_port->type_lock irq_context: 0 cb_lock &devlink->lock_key#3 nl_table_lock irq_context: 0 cb_lock &devlink->lock_key#3 nl_table_wait.lock irq_context: 0 cb_lock &devlink->lock_key#3 &xa->xa_lock#15 irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex rcu_node_0 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex flowtable_lock &x->wait#10 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &____s->seqcount#2 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex kernfs_idr_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex kernfs_idr_lock pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &n->list_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#3 &c->lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) rcu_node_0 irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &sem->wait_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &p->pi_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &p->pi_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex uevent_sock_mutex &____s->seqcount#2 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex uevent_sock_mutex &____s->seqcount irq_context: 0 cb_lock &devlink->lock_key#3 &xa->xa_lock#15 &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 &xa->xa_lock#15 pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex fib_info_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex fib_info_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex fib_info_lock pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &tbl->lock &obj_hash[i].lock pool_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &n->list_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &n->list_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex (inetaddr_chain).rwsem irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex (inetaddr_chain).rwsem &c->lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex (inetaddr_chain).rwsem pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex (inetaddr_chain).rwsem &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock &data->fib_event_queue_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex (inetaddr_chain).rwsem rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex (inetaddr_chain).rwsem fs_reclaim irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex (inetaddr_chain).rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex (inetaddr_chain).rwsem nl_table_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex (inetaddr_chain).rwsem nl_table_wait.lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex (inetaddr_chain).rwsem fib_info_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex (inetaddr_chain).rwsem &tbl->lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex (inetaddr_chain).rwsem &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex (inetaddr_chain).rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex (inetaddr_chain).rwsem class irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex (inetaddr_chain).rwsem (&tbl->proxy_timer) irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex (inetaddr_chain).rwsem &base->lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex (inetaddr_chain).rwsem &net->sctp.local_addr_lock &net->sctp.addr_wq_lock pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex (inetaddr_chain).rwsem krc.lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex remove_cache_srcu pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex gdp_mutex sysfs_symlink_target_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex gdp_mutex &root->kernfs_rwsem irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex gdp_mutex kernfs_idr_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex gdp_mutex &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex gdp_mutex pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 rtnl_mutex &xs->mutex irq_context: 0 rtnl_mutex &xs->mutex fs_reclaim irq_context: 0 rtnl_mutex &xs->mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &xs->mutex pool_lock#2 irq_context: 0 rtnl_mutex &xs->mutex &zone->lock irq_context: 0 rtnl_mutex &xs->mutex &____s->seqcount irq_context: 0 rtnl_mutex &xs->mutex &pool->xsk_tx_list_lock irq_context: 0 &xs->mutex free_vmap_area_lock &obj_hash[i].lock irq_context: 0 &xs->mutex free_vmap_area_lock pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#3 &obj_hash[i].lock pool_lock irq_context: 0 cb_lock &devlink->lock_key#3 &n->list_lock irq_context: 0 cb_lock &devlink->lock_key#3 &n->list_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#3 rcu_node_0 irq_context: 0 cb_lock &devlink->lock_key#3 &rcu_state.expedited_wq irq_context: 0 cb_lock &devlink->lock_key#3 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 cb_lock &devlink->lock_key#3 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#3 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#3 (work_completion)(&(&hwstats->traffic_dw)->work) irq_context: 0 cb_lock &devlink->lock_key#3 rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex &xs->mutex &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex &xs->mutex &c->lock irq_context: 0 rtnl_mutex &xs->mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex &xs->mutex rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->mutex &pool->xsk_tx_list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#3 &hwstats->hwsdev_list_lock irq_context: 0 cb_lock &devlink->lock_key#3 &____s->seqcount irq_context: 0 cb_lock &devlink->lock_key#3 rcu_read_lock pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 &(&fn_net->fib_chain)->lock irq_context: 0 cb_lock &devlink->lock_key#3 &rnp->exp_lock irq_context: 0 cb_lock &devlink->lock_key#3 rcu_state.exp_mutex irq_context: 0 cb_lock &devlink->lock_key#3 rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 cb_lock &devlink->lock_key#3 rcu_state.exp_mutex &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#3 rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->mutex rcu_state.exp_mutex.wait_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->mutex &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->mutex &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &xs->mutex &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 rcu_state.exp_mutex rcu_node_0 irq_context: 0 cb_lock &devlink->lock_key#3 rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 cb_lock &devlink->lock_key#3 rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &(&net->nexthop.notifier_chain)->rwsem irq_context: 0 cb_lock &devlink->lock_key#3 (work_completion)(&data->fib_flush_work) irq_context: 0 cb_lock &devlink->lock_key#3 (work_completion)(&data->fib_event_work) irq_context: 0 cb_lock &devlink->lock_key#3 (work_completion)(&ht->run_work) irq_context: 0 cb_lock &devlink->lock_key#3 &ht->mutex irq_context: 0 cb_lock &devlink->lock_key#3 &ht->mutex &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 &ht->mutex pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) irq_context: 0 cb_lock &devlink->lock_key#3 (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#3 (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&pool->work) irq_context: 0 (wq_completion)events (work_completion)(&pool->work) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&pool->work) rtnl_mutex irq_context: 0 (wq_completion)events (work_completion)(&pool->work) vmap_area_lock irq_context: 0 (wq_completion)events (work_completion)(&pool->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&pool->work) purge_vmap_area_lock irq_context: 0 (wq_completion)events (work_completion)(&pool->work) purge_vmap_area_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&pool->work) purge_vmap_area_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&pool->work) rcu_read_lock pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 &nsim_trap_data->trap_lock irq_context: 0 cb_lock &devlink->lock_key#3 rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 (wq_completion)events (work_completion)(&pool->work) pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&pool->work) umem_ida.xa_lock irq_context: 0 (wq_completion)events (work_completion)(&pool->work) rcu_read_lock &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&pool->work) rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&pool->work) &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&pool->work) &zone->lock &____s->seqcount irq_context: 0 purge_vmap_area_lock &meta->lock irq_context: 0 purge_vmap_area_lock kfence_freelist_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &meta->lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle kfence_freelist_lock irq_context: 0 cb_lock &devlink->lock_key#3 pcpu_lock irq_context: 0 cb_lock &devlink->lock_key#3 ®ion->snapshot_lock irq_context: 0 cb_lock &devlink->lock_key#3 pcpu_alloc_mutex irq_context: 0 cb_lock &devlink->lock_key#3 pcpu_alloc_mutex pcpu_lock irq_context: 0 cb_lock &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 cb_lock &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 &xa->xa_lock#4 irq_context: 0 cb_lock &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 stock_lock irq_context: 0 cb_lock &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 cb_lock &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 cb_lock &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 cb_lock &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 cb_lock &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 cb_lock &devlink->lock_key#3 batched_entropy_u32.lock irq_context: 0 cb_lock &devlink->lock_key#3 rcu_read_lock &data->fib_event_queue_lock irq_context: 0 cb_lock &devlink->lock_key#3 rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 cb_lock &devlink->lock_key#3 rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 rcu_read_lock rcu_node_0 irq_context: 0 cb_lock &devlink->lock_key#3 rcu_read_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#3 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#3 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock &devlink->lock_key#3 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#3 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#3 rcu_read_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#3 rcu_read_lock &n->list_lock irq_context: 0 cb_lock &devlink->lock_key#3 rcu_read_lock &n->list_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#3 rcu_read_lock &tb->tb6_lock irq_context: 0 cb_lock &devlink->lock_key#3 rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 cb_lock &devlink->lock_key#3 rcu_read_lock &tb->tb6_lock pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 rcu_read_lock &tb->tb6_lock &data->fib_event_queue_lock irq_context: 0 cb_lock &devlink->lock_key#3 rcu_read_lock &tb->tb6_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#3 rcu_read_lock &tb->tb6_lock &n->list_lock irq_context: 0 cb_lock &devlink->lock_key#3 rcu_read_lock &tb->tb6_lock &n->list_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#3 rcu_read_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 rcu_read_lock &rcu_state.gp_wq irq_context: 0 cb_lock &devlink->lock_key#3 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cb_lock &devlink->lock_key#3 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_PACKET stock_lock irq_context: 0 cb_lock &devlink->lock_key#3 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 cb_lock &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 cb_lock &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 rcu_read_lock pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 stock_lock irq_context: 0 cb_lock &devlink->lock_key#3 stack_depot_init_mutex irq_context: 0 cb_lock &devlink->lock_key#3 crngs.lock irq_context: 0 cb_lock &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 &____s->seqcount#2 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex bpf_devs_lock fs_reclaim irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex bpf_devs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 stock_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 &dentry->d_lock &wq irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 &s->s_inode_list_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 &sb->s_type->i_lock_key#7 &dentry->d_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 &c->lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &xa->xa_lock#3 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex gdp_mutex fs_reclaim irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex gdp_mutex &c->lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex gdp_mutex &n->list_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex gdp_mutex &n->list_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex gdp_mutex lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex gdp_mutex lock kernfs_idr_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex gdp_mutex lock kernfs_idr_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex gdp_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex gdp_mutex kobj_ns_type_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex lock kernfs_idr_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex bus_type_sem irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex uevent_sock_mutex &n->list_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex input_pool.lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex batched_entropy_u32.lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex stock_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex failover_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex pcpu_alloc_mutex irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex proc_subdir_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &idev->mc_lock rcu_read_lock pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &vn->sock_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock (console_sem).lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock console_lock console_srcu console_owner_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock console_lock console_srcu console_owner irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 cb_lock &devlink->lock_key#3 &xa->xa_lock#15 &c->lock irq_context: 0 cb_lock &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock irq_context: 0 cb_lock &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 &pcp->lock &zone->lock &____s->seqcount irq_context: 0 cb_lock &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 rcu_read_lock rcu_node_0 irq_context: 0 cb_lock &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 rcu_read_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex bpf_devs_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock rcu_node_0 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 &n->list_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 &n->list_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex lock kernfs_idr_lock &c->lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &idev->mc_lock _xmit_ETHER &c->lock irq_context: 0 cb_lock &devlink->lock_key#3 &____s->seqcount#2 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex quarantine_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &rnp->exp_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_state.exp_mutex irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 bpf_stats_enabled_mutex cpu_hotplug_lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events &cfs_rq->removed.lock irq_context: 0 (wq_completion)events &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex sysctl_lock &obj_hash[i].lock pool_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex sysctl_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex sysctl_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex sysctl_lock krc.lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex sysctl_lock krc.lock &base->lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex sysctl_lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex gdp_mutex kernfs_idr_lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex gdp_mutex kernfs_idr_lock pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex gdp_mutex &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex gdp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock &trie->lock krc.lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rcu_read_lock &trie->lock krc.lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#8 rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 cb_lock &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 fill_pool_map-wait-type-override pool_lock irq_context: 0 cb_lock &devlink->lock_key#3 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock &devlink->lock_key#3 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#3 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#3 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 &x->wait#10 irq_context: 0 (wq_completion)events_unbound (reaper_work).work fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_unbound (reaper_work).work fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: softirq (&pool->idle_timer) rcu_read_lock &pool->lock/1 irq_context: softirq (&pool->idle_timer) rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq (&pool->idle_timer) rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: softirq (&pool->idle_timer) rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &pool->lock/1 irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &pool->lock/1 &base->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &pool->lock/1 &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &rq->__lock irq_context: 0 wq_pool_attach_mutex wq_pool_attach_mutex.wait_lock irq_context: 0 wq_pool_attach_mutex &rq->__lock irq_context: 0 wq_pool_attach_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex.wait_lock irq_context: 0 wq_pool_attach_mutex.wait_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock kfence_freelist_lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock &base->lock irq_context: 0 (wq_completion)events (work_completion)(&barr->work) &x->wait#10 &p->pi_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 &ht->mutex &meta->lock irq_context: 0 cb_lock &devlink->lock_key#3 &ht->mutex kfence_freelist_lock irq_context: 0 cb_lock &devlink->lock_key#3 rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) rcu_node_0 irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#3 batched_entropy_u8.lock irq_context: 0 cb_lock &devlink->lock_key#3 kfence_freelist_lock irq_context: 0 cb_lock &devlink->lock_key#3 &meta->lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &sb->s_type->i_mutex_key#3 &____s->seqcount#2 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex batched_entropy_u32.lock crngs.lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events_long &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &root->kernfs_rwsem rcu_read_lock rcu_node_0 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex cpu_hotplug_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock &____s->seqcount#2 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mm_percpu_wq (work_completion)(&(({ do { const void *__vpp_verify = (typeof((&vmstat_work) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((&vmstat_work))) *)((&vmstat_work)))); (typeof((typeof(*((&vmstat_work))) *)((&vmstat_work)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); }))->work) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&pool->idle_timer) rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: softirq (&pool->idle_timer) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq (&pool->idle_timer) rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &pool->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &pool->lock &base->lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) wq_pool_attach_mutex &pool->lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh &meta->lock irq_context: 0 (wq_completion)wg-crypt-wg1 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh kfence_freelist_lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) kfence_freelist_lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &meta->lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh kfence_freelist_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &____s->seqcount irq_context: 0 cb_lock &devlink->lock_key#3 &ht->mutex &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#3 &ht->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#3 &pcp->lock &zone->lock irq_context: 0 cb_lock &devlink->lock_key#3 quarantine_lock irq_context: 0 cb_lock &devlink->lock_key#3 remove_cache_srcu irq_context: 0 cb_lock &devlink->lock_key#3 remove_cache_srcu quarantine_lock irq_context: 0 cb_lock &devlink->lock_key#3 remove_cache_srcu &c->lock irq_context: 0 cb_lock &devlink->lock_key#3 remove_cache_srcu &n->list_lock irq_context: 0 cb_lock &devlink->lock_key#3 remove_cache_srcu &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 remove_cache_srcu pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 cb_lock &devlink->lock_key#3 &sb->s_type->i_mutex_key#3 stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &idev->mc_lock _xmit_ETHER &n->list_lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &idev->mc_lock _xmit_ETHER &n->list_lock &c->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#8 &pl->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 rcu_read_lock &xa->xa_lock#8 &pl->lock key#12 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &n->list_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &type->s_umount_key#40 &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ret->b_state_lock bit_wait_table + i irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ret->b_state_lock bit_wait_table + i &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ret->b_state_lock bit_wait_table + i &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ret->b_state_lock bit_wait_table + i &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &ret->b_state_lock bit_wait_table + i irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &ret->b_state_lock bit_wait_table + i &p->pi_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &ret->b_state_lock bit_wait_table + i &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &ret->b_state_lock bit_wait_table + i &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)tipc_rcv#2 irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) &srv->idr_lock irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) fs_reclaim irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) &sb->s_type->i_lock_key#8 irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) k-sk_lock-AF_TIPC irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) k-sk_lock-AF_TIPC k-slock-AF_TIPC irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) k-sk_lock-AF_TIPC fs_reclaim irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) k-sk_lock-AF_TIPC fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) k-sk_lock-AF_TIPC &dir->lock irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) k-sk_lock-AF_TIPC &obj_hash[i].lock irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) k-sk_lock-AF_TIPC batched_entropy_u32.lock irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) k-sk_lock-AF_TIPC rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) k-sk_lock-AF_TIPC k-sk_lock-AF_TIPC/1 irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) k-sk_lock-AF_TIPC k-sk_lock-AF_TIPC/1 k-slock-AF_TIPC irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) k-sk_lock-AF_TIPC k-sk_lock-AF_TIPC/1 &obj_hash[i].lock irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) k-sk_lock-AF_TIPC k-sk_lock-AF_TIPC/1 &base->lock irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) k-sk_lock-AF_TIPC k-sk_lock-AF_TIPC/1 &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) k-sk_lock-AF_TIPC k-sk_lock-AF_TIPC/1 fs_reclaim irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) k-sk_lock-AF_TIPC k-sk_lock-AF_TIPC/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) k-sk_lock-AF_TIPC k-sk_lock-AF_TIPC/1 &list->lock#23 irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) k-sk_lock-AF_TIPC k-sk_lock-AF_TIPC/1 slock-AF_TIPC &list->lock#23 irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) k-sk_lock-AF_TIPC k-sk_lock-AF_TIPC/1 slock-AF_TIPC &obj_hash[i].lock irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) k-sk_lock-AF_TIPC k-sk_lock-AF_TIPC/1 slock-AF_TIPC &base->lock irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) k-sk_lock-AF_TIPC k-sk_lock-AF_TIPC/1 slock-AF_TIPC &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) k-sk_lock-AF_TIPC k-sk_lock-AF_TIPC/1 slock-AF_TIPC pool_lock#2 irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) k-sk_lock-AF_TIPC k-sk_lock-AF_TIPC/1 slock-AF_TIPC rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) k-sk_lock-AF_TIPC k-sk_lock-AF_TIPC/1 slock-AF_TIPC rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) k-sk_lock-AF_TIPC k-sk_lock-AF_TIPC/1 slock-AF_TIPC rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) k-sk_lock-AF_TIPC k-sk_lock-AF_TIPC/1 slock-AF_TIPC rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) k-slock-AF_TIPC irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) &obj_hash[i].lock irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_TIPC k-slock-AF_TIPC k-clock-AF_TIPC rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) k-clock-AF_TIPC irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) k-clock-AF_TIPC irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) k-clock-AF_TIPC rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) k-clock-AF_TIPC rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) k-clock-AF_TIPC rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) k-clock-AF_TIPC rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) k-clock-AF_TIPC rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) &c->lock irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) &xa->xa_lock#8 irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) &fsnotify_mark_srcu irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&con->rwork) irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&con->rwork) k-sk_lock-AF_TIPC irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&con->rwork) k-sk_lock-AF_TIPC k-slock-AF_TIPC irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&con->rwork) k-slock-AF_TIPC irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) &rq->__lock irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&con->rwork) k-sk_lock-AF_TIPC &obj_hash[i].lock irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&con->rwork) k-sk_lock-AF_TIPC pool_lock#2 irq_context: 0 sk_lock-AF_TIPC k-slock-AF_TIPC &c->lock irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) k-sk_lock-AF_TIPC k-sk_lock-AF_TIPC/1 &c->lock irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) k-sk_lock-AF_TIPC k-sk_lock-AF_TIPC/1 &____s->seqcount#2 irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) k-sk_lock-AF_TIPC k-sk_lock-AF_TIPC/1 &____s->seqcount irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) k-sk_lock-AF_TIPC k-sk_lock-AF_TIPC/1 pool_lock#2 irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&con->rwork) k-sk_lock-AF_TIPC k-clock-AF_TIPC irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&con->rwork) k-sk_lock-AF_TIPC k-clock-AF_TIPC rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&con->rwork) k-sk_lock-AF_TIPC k-clock-AF_TIPC rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&con->rwork) k-sk_lock-AF_TIPC k-clock-AF_TIPC rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&con->rwork) k-sk_lock-AF_TIPC k-clock-AF_TIPC rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&con->rwork) k-sk_lock-AF_TIPC k-clock-AF_TIPC rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&con->rwork) k-sk_lock-AF_TIPC k-clock-AF_TIPC rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&con->rwork) k-sk_lock-AF_TIPC k-clock-AF_TIPC rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&con->rwork) k-sk_lock-AF_TIPC &list->lock#23 irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&con->rwork) k-sk_lock-AF_TIPC slock-AF_TIPC &list->lock#23 irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&con->rwork) k-sk_lock-AF_TIPC slock-AF_TIPC k-slock-AF_TIPC &list->lock#23 irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&con->rwork) k-sk_lock-AF_TIPC slock-AF_TIPC &obj_hash[i].lock irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&con->rwork) k-sk_lock-AF_TIPC slock-AF_TIPC pool_lock#2 irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&con->rwork) k-sk_lock-AF_TIPC quarantine_lock irq_context: 0 sk_lock-AF_TIPC rcu_node_0 irq_context: softirq &(&wb->bw_dwork)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&con->rwork) k-sk_lock-AF_TIPC &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex dev_pm_qos_sysfs_mtx &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC &rcu_state.expedited_wq irq_context: 0 sk_lock-AF_TIPC &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sk_lock-AF_TIPC &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_TIPC &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &cfs_rq->removed.lock irq_context: 0 cb_lock &devlink->lock_key#3 remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&con->rwork) k-sk_lock-AF_TIPC &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&con->rwork) k-sk_lock-AF_TIPC rcu_node_0 irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page)#2 lock#5 irq_context: 0 &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page)#2 lock#5 &lruvec->lru_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock rcu_read_lock ptlock_ptr(page)#2 lock#5 irq_context: 0 sk_lock-AF_TIPC remove_cache_srcu rcu_node_0 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem ptlock_ptr(page)#2 lock#5 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem lock#5 &lruvec->lru_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem ptlock_ptr(page)#2 lock#5 &lruvec->lru_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex uevent_sock_mutex rcu_node_0 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex uevent_sock_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC k-slock-AF_TIPC &list->lock#23 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC k-slock-AF_TIPC k-clock-AF_TIPC irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC k-slock-AF_TIPC k-clock-AF_TIPC rcu_read_lock &pool->lock/1 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC k-slock-AF_TIPC k-clock-AF_TIPC rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC k-slock-AF_TIPC k-clock-AF_TIPC rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC k-slock-AF_TIPC k-clock-AF_TIPC rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC k-slock-AF_TIPC k-clock-AF_TIPC rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC k-slock-AF_TIPC k-clock-AF_TIPC rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&con->rwork) k-clock-AF_TIPC irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&con->rwork) k-clock-AF_TIPC &con->sub_lock irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&con->rwork) &srv->idr_lock irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&con->rwork) k-sk_lock-AF_TIPC &base->lock irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&con->rwork) k-sk_lock-AF_TIPC &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&con->rwork) k-sk_lock-AF_TIPC rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&con->rwork) k-sk_lock-AF_TIPC k-clock-AF_TIPC irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&con->rwork) &obj_hash[i].lock irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&con->rwork) &sb->s_type->i_lock_key#8 irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&con->rwork) &xa->xa_lock#8 irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&con->rwork) &fsnotify_mark_srcu irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&con->rwork) &con->outqueue_lock irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&con->rwork) &srv->idr_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET &asoc->wait &p->pi_lock irq_context: 0 sk_lock-AF_INET &asoc->wait &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET &asoc->wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 key#26 irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) &srv->idr_lock &c->lock irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) k-sk_lock-AF_TIPC batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) &n->list_lock irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) &n->list_lock &c->lock irq_context: 0 sk_lock-AF_TIPC k-slock-AF_TIPC &n->list_lock irq_context: 0 sk_lock-AF_TIPC k-slock-AF_TIPC &n->list_lock &c->lock irq_context: 0 sk_lock-AF_TIPC k-slock-AF_TIPC &____s->seqcount#2 irq_context: 0 sk_lock-AF_TIPC k-slock-AF_TIPC &____s->seqcount irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&con->rwork) k-sk_lock-AF_TIPC &meta->lock irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&con->rwork) k-sk_lock-AF_TIPC kfence_freelist_lock irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&con->rwork) k-sk_lock-AF_TIPC &c->lock irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&con->rwork) k-sk_lock-AF_TIPC &____s->seqcount#2 irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&con->rwork) k-sk_lock-AF_TIPC &____s->seqcount irq_context: 0 rtnl_mutex &sch->q.lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_read_lock &est->seq irq_context: softirq (&est->timer) irq_context: softirq (&est->timer) &est->seq irq_context: softirq (&est->timer) &obj_hash[i].lock irq_context: softirq (&est->timer) &base->lock irq_context: softirq (&est->timer) &base->lock &obj_hash[i].lock irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq (&peer->timer_persistent_keepalive) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&mp->timer) &br->multicast_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &____s->seqcount#2 irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 cb_lock &devlink->lock_key#3 rtnl_mutex &idev->mc_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) k-sk_lock-AF_TIPC &obj_hash[i].lock pool_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock quarantine_lock irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&srv->awork) k-sk_lock-AF_TIPC &c->lock irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&con->rwork) &rq->__lock irq_context: 0 rtnl_mutex &tb->tb6_lock (console_sem).lock irq_context: 0 rtnl_mutex &tb->tb6_lock console_lock console_srcu console_owner_lock irq_context: 0 rtnl_mutex &tb->tb6_lock console_lock console_srcu console_owner irq_context: 0 rtnl_mutex &tb->tb6_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 rtnl_mutex &tb->tb6_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&con->rwork) k-sk_lock-AF_TIPC &cfs_rq->removed.lock irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 sb_writers#8 tomoyo_ss remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#8 tomoyo_ss remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex sk_lock-AF_INET6 rcu_read_lock &____s->seqcount irq_context: 0 rtnl_mutex sk_lock-AF_INET6 rcu_read_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock fs_reclaim irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock &c->lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock pool_lock#2 irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock _xmit_ETHER irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock &base->lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock krc.lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 krc.lock irq_context: 0 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex sk_lock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex sk_lock-AF_INET6 slock-AF_INET6 irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock krc.lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock _xmit_ETHER irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex sk_lock-AF_INET6 &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex sk_lock-AF_INET6 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex sk_lock-AF_INET6 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex sk_lock-AF_INET6 krc.lock irq_context: 0 &sb->s_type->i_mutex_key#10 rtnl_mutex slock-AF_INET6 irq_context: 0 sk_lock-AF_TIPC k-slock-AF_TIPC k-clock-AF_TIPC rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_TIPC k-slock-AF_TIPC k-clock-AF_TIPC rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sk_lock-AF_TIPC k-slock-AF_TIPC k-clock-AF_TIPC rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 sk_lock-AF_TIPC k-slock-AF_TIPC k-clock-AF_TIPC rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 &iint->mutex &rcu_state.expedited_wq irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 &mm->mmap_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)tipc_rcv#2 (work_completion)(&con->rwork) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &block->cb_lock &tp->lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rtnl_mutex fs_reclaim irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rtnl_mutex pool_lock#2 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rtnl_mutex.wait_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex &p->pi_lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex &p->pi_lock &rq->__lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &dev->tx_global_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex &dev->tx_global_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex &dev->tx_global_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &fsnotify_mark_srcu rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 fs_reclaim pgd_lock irq_context: 0 fs_reclaim stock_lock irq_context: 0 fs_reclaim key irq_context: 0 fs_reclaim pcpu_lock irq_context: 0 fs_reclaim percpu_counters_lock irq_context: 0 fs_reclaim pcpu_lock stock_lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nlk_cb_mutex-GENERIC genl_mutex rtnl_mutex irq_context: 0 nlk_cb_mutex-GENERIC remove_cache_srcu irq_context: 0 nlk_cb_mutex-GENERIC remove_cache_srcu quarantine_lock irq_context: 0 nlk_cb_mutex-GENERIC remove_cache_srcu &c->lock irq_context: 0 nlk_cb_mutex-GENERIC remove_cache_srcu &n->list_lock irq_context: 0 nlk_cb_mutex-GENERIC remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 nlk_cb_mutex-GENERIC remove_cache_srcu &obj_hash[i].lock irq_context: 0 nlk_cb_mutex-GENERIC remove_cache_srcu &rq->__lock irq_context: 0 sk_lock-AF_ALG rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_ALG rcu_read_lock rcu_read_lock &rq->__lock irq_context: softirq &(&hctx->run_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &cfs_rq->removed.lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock batched_entropy_u8.lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock kfence_freelist_lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &meta->lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh kfence_freelist_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock rcu_read_lock pool_lock#2 irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rtnl_mutex &c->lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &ei->i_data_sem rcu_node_0 irq_context: 0 sk_lock-AF_ALG &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock (console_sem).lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock console_lock console_srcu console_owner_lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock console_lock console_srcu console_owner irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock sb_pagefaults mapping.invalidate_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &ct->lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &ct->lock (console_sem).lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &ct->lock console_lock console_srcu console_owner_lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &ct->lock console_lock console_srcu console_owner irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &ct->lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink rcu_read_lock &ct->lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 &pipe->mutex/1 nfnl_subsys_ctnetlink &obj_hash[i].lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &n->list_lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex uevent_sock_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock tk_core.seq.seqcount irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock_bh &c->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock_bh &n->list_lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock_bh &n->list_lock &c->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock_bh &dir->lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock_bh &tbl->lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_ALG &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key &obj_hash[i].lock irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key krc.lock irq_context: 0 rtnl_mutex &pmc->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) rcu_node_0 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex pcpu_alloc_mutex &rq->__lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7/1 cgroup_mutex pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xs->mutex rcu_node_0 irq_context: 0 &xs->mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 &xs->mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &xs->mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &xs->mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ei->i_data_sem &mapping->private_lock irq_context: 0 &xs->mutex fs_reclaim &rq->__lock irq_context: 0 &xs->mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &xs->mutex &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem batched_entropy_u32.lock irq_context: 0 cb_lock pcpu_alloc_mutex irq_context: 0 cb_lock pcpu_alloc_mutex pcpu_lock irq_context: 0 cb_lock crngs.lock irq_context: 0 cb_lock ovs_mutex fs_reclaim irq_context: 0 cb_lock ovs_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock ovs_mutex pcpu_alloc_mutex irq_context: 0 cb_lock ovs_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 cb_lock ovs_mutex stock_lock irq_context: 0 cb_lock ovs_mutex stack_depot_init_mutex irq_context: 0 cb_lock ovs_mutex crngs.lock irq_context: 0 cb_lock ovs_mutex &c->lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex irq_context: 0 cb_lock ovs_mutex rtnl_mutex fs_reclaim irq_context: 0 cb_lock ovs_mutex rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock ovs_mutex rtnl_mutex &c->lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex pool_lock#2 irq_context: 0 cb_lock ovs_mutex rtnl_mutex &xa->xa_lock#3 irq_context: 0 cb_lock ovs_mutex rtnl_mutex net_rwsem irq_context: 0 cb_lock ovs_mutex rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 cb_lock ovs_mutex rtnl_mutex &tn->lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex &x->wait#9 irq_context: 0 cb_lock ovs_mutex rtnl_mutex &obj_hash[i].lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex &k->list_lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex gdp_mutex irq_context: 0 cb_lock ovs_mutex rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex lock kernfs_idr_lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex &root->kernfs_rwsem irq_context: 0 cb_lock ovs_mutex rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 cb_lock ovs_mutex rtnl_mutex bus_type_sem irq_context: 0 cb_lock ovs_mutex rtnl_mutex sysfs_symlink_target_lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex lock kernfs_idr_lock pool_lock#2 irq_context: 0 cb_lock ovs_mutex rtnl_mutex &____s->seqcount#2 irq_context: 0 cb_lock ovs_mutex rtnl_mutex &____s->seqcount irq_context: 0 cb_lock ovs_mutex rtnl_mutex &root->kernfs_rwsem irq_context: 0 cb_lock ovs_mutex rtnl_mutex &dev->power.lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex dpm_list_mtx irq_context: 0 cb_lock ovs_mutex rtnl_mutex uevent_sock_mutex irq_context: 0 cb_lock ovs_mutex rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 cb_lock ovs_mutex rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock ovs_mutex rtnl_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 cb_lock ovs_mutex rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex subsys mutex#17 irq_context: 0 cb_lock ovs_mutex rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex &dir->lock#2 irq_context: 0 cb_lock ovs_mutex rtnl_mutex rcu_read_lock pool_lock#2 irq_context: 0 cb_lock ovs_mutex rtnl_mutex lock kernfs_idr_lock &c->lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex dev_hotplug_mutex irq_context: 0 cb_lock ovs_mutex rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex dev_base_lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex input_pool.lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex rcu_read_lock &pool->lock/1 irq_context: 0 cb_lock ovs_mutex rtnl_mutex rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 cb_lock ovs_mutex rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock ovs_mutex rtnl_mutex &rq->__lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock ovs_mutex rtnl_mutex batched_entropy_u32.lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex &tbl->lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex stock_lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex sysctl_lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex nl_table_lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex nl_table_wait.lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex failover_lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex pcpu_alloc_mutex irq_context: 0 cb_lock ovs_mutex rtnl_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex proc_subdir_lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex proc_subdir_lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex &n->list_lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex &n->list_lock &c->lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex &idev->mc_lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 cb_lock ovs_mutex rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock ovs_mutex rtnl_mutex &idev->mc_lock pool_lock#2 irq_context: 0 cb_lock ovs_mutex rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex &idev->mc_lock _xmit_ETHER irq_context: 0 cb_lock ovs_mutex rtnl_mutex &idev->mc_lock _xmit_ETHER pool_lock#2 irq_context: 0 cb_lock ovs_mutex rtnl_mutex &pnettable->lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex smc_ib_devices.mutex irq_context: 0 cb_lock ovs_mutex rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex (console_sem).lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex console_lock console_srcu console_owner_lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex console_lock console_srcu console_owner irq_context: 0 cb_lock ovs_mutex rtnl_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 cb_lock ovs_mutex rtnl_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 cb_lock ovs_mutex rtnl_mutex _xmit_ETHER irq_context: 0 cb_lock nl_table_lock irq_context: 0 cb_lock nl_table_wait.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem rcu_node_0 irq_context: 0 &ei->i_data_sem &c->lock irq_context: 0 hashlimit_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 hashlimit_mutex &pcp->lock &zone->lock irq_context: 0 hashlimit_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sk_lock-AF_LLC &mm->mmap_lock irq_context: 0 hashlimit_mutex &c->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem remove_cache_srcu irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem remove_cache_srcu &c->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_es_lock key#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &wb->list_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_prealloc_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->private_lock rcu_read_lock &memcg->move_lock irq_context: 0 &mm->mmap_lock sb_pagefaults &wb->list_lock irq_context: 0 &mm->mmap_lock sb_pagefaults &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 &xs->mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &xs->mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &xs->mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &xs->mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex fs_reclaim irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &journal->j_state_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &journal->j_state_lock &journal->j_wait_commit irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &journal->j_wait_commit irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &journal->j_wait_done_commit irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle lock#4 &lruvec->lru_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &bgl->locks[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem rcu_read_lock key#10 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ret->b_state_lock &journal->j_list_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &n->list_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#32 rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#32 rcu_read_lock &wb->work_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#32 rcu_read_lock &wb->work_lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#32 rcu_read_lock &wb->work_lock &base->lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#32 rcu_read_lock &wb->work_lock &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#32 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#32 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#32 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#32 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#32 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#32 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#32 rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#32 rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#32 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#32 rcu_read_lock &rcu_state.gp_wq irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#32 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#32 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#32 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock rcu_read_lock &xa->xa_lock#8 irq_context: 0 &mm->mmap_lock &sb->s_type->i_lock_key#22 irq_context: 0 &mm->mmap_lock &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#32 &bdi->wb_waitq irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#32 &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#32 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#32 rcu_read_lock &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &xa->xa_lock#8 &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 &sighand->siglock &____s->seqcount#2 irq_context: 0 &sighand->siglock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&aux->work) &meta->lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) kfence_freelist_lock irq_context: 0 &mm->mmap_lock sb_pagefaults mapping.invalidate_lock &type->s_umount_key#32 rcu_read_lock &wb->work_lock rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock key#2 irq_context: 0 &mm->mmap_lock rcu_read_lock &xa->xa_lock#8 key#10 irq_context: 0 cgroup_threadgroup_rwsem pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &journal->j_state_lock tk_core.seq.seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &journal->j_state_lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &journal->j_state_lock &base->lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem &journal->j_state_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &xa->xa_lock#8 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &xa->xa_lock#8 pool_lock#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem lock#4 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &journal->j_revoke_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount#2 irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock &____s->seqcount irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex key#3 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sem->wait_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &n->list_lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_es_lock key#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock &xa->xa_lock#8 key#10 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem key#3 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem key#14 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &wb->list_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &wb->list_lock &sb->s_type->i_lock_key#22 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &xa->xa_lock#8 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &xa->xa_lock#8 pool_lock#2 irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem lock#4 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &____s->seqcount irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem stock_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &xa->xa_lock#8 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem lock#4 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem bit_wait_table + i irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &sbi->s_orphan_lock &ret->b_state_lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &lruvec->lru_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock key#8 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &cfs_rq->removed.lock irq_context: 0 (wq_completion)writeback (work_completion)(&(&wb->dwork)->work) &bdi->wb_waitq &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle &ei->i_data_sem &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &pipe->mutex/1 rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 &pipe->mutex/1 rtnl_mutex net_rwsem &rq->__lock irq_context: 0 &pipe->mutex/1 rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 rtnl_mutex &n->list_lock irq_context: 0 &pipe->mutex/1 rtnl_mutex &n->list_lock &c->lock irq_context: 0 &pipe->mutex/1 rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 &pipe->mutex/1 rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 &pipe->mutex/1 rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 &pipe->mutex/1 rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 &pipe->mutex/1 rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 &pipe->mutex/1 rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 &pipe->mutex/1 rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 rtnl_mutex _xmit_ETHER irq_context: 0 &pipe->mutex/1 rtnl_mutex rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 &pipe->mutex/1 rtnl_mutex rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 &pipe->mutex/1 rtnl_mutex rlock-AF_NETLINK irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex lock#4 rcu_read_lock pool_lock#2 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex lock#4 &obj_hash[i].lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex _xmit_NONE irq_context: 0 &ctx->tx_lock irq_context: 0 &ctx->tx_lock sk_lock-AF_INET6 irq_context: 0 &ctx->tx_lock sk_lock-AF_INET6 &rq->__lock irq_context: 0 &ctx->tx_lock sk_lock-AF_INET6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ctx->tx_lock sk_lock-AF_INET6 slock-AF_INET6 irq_context: 0 &ctx->tx_lock sk_lock-AF_INET6 fs_reclaim irq_context: 0 &ctx->tx_lock sk_lock-AF_INET6 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &ctx->tx_lock sk_lock-AF_INET6 pool_lock#2 irq_context: 0 &ctx->tx_lock sk_lock-AF_INET6 &____s->seqcount irq_context: 0 &ctx->tx_lock sk_lock-AF_INET6 &c->lock irq_context: 0 &ctx->tx_lock sk_lock-AF_INET6 tk_core.seq.seqcount irq_context: 0 &ctx->tx_lock sk_lock-AF_INET6 &obj_hash[i].lock irq_context: 0 &ctx->tx_lock sk_lock-AF_INET6 &base->lock irq_context: 0 &ctx->tx_lock sk_lock-AF_INET6 &base->lock &obj_hash[i].lock irq_context: 0 &ctx->tx_lock sk_lock-AF_INET6 &ei->socket.wq.wait irq_context: 0 &ctx->tx_lock slock-AF_INET6 irq_context: 0 &ctx->tx_lock &rq->__lock irq_context: 0 &ctx->tx_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ctx->tx_lock sk_lock-AF_INET6 slock-AF_INET6 &sk->sk_lock.wq irq_context: 0 &ctx->tx_lock sk_lock-AF_INET6 &n->list_lock irq_context: 0 &ctx->tx_lock sk_lock-AF_INET6 &n->list_lock &c->lock irq_context: 0 &ctx->tx_lock sk_lock-AF_INET6 &____s->seqcount#2 irq_context: 0 &ctx->tx_lock sk_lock-AF_INET6 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &ctx->tx_lock sk_lock-AF_INET6 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &ctx->tx_lock sk_lock-AF_INET6 rcu_read_lock pool_lock#2 irq_context: 0 &xs->mutex &mm->mmap_lock &sem->wait_lock irq_context: 0 rtnl_mutex &xs->mutex &lock->wait_lock irq_context: 0 rtnl_mutex &xs->mutex &rq->__lock irq_context: 0 rtnl_mutex &xs->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xs->mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex _xmit_NONE &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex &n->list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex.wait_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex &c->lock irq_context: 0 &xs->mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &xs->mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 &xs->mutex purge_vmap_area_lock irq_context: 0 &xs->mutex rcu_read_lock pool_lock#2 irq_context: 0 &xs->mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &xs->mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &xs->mutex rcu_read_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &xs->mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&hwstats->traffic_dw)->work) fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex uevent_sock_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex k-sk_lock-AF_INET6 irq_context: 0 cb_lock genl_mutex k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 cb_lock genl_mutex k-slock-AF_INET6 irq_context: 0 cb_lock genl_mutex k-clock-AF_INET6 irq_context: 0 rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC rcu_node_0 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &rcu_state.expedited_wq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_read_lock &br->hash_lock irq_context: 0 rcu_read_lock rcu_read_lock &br->hash_lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock &br->hash_lock rcu_read_lock rhashtable_bucket irq_context: 0 rcu_read_lock rcu_read_lock &br->hash_lock nl_table_lock irq_context: 0 rcu_read_lock rcu_read_lock &br->hash_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock &br->hash_lock nl_table_wait.lock irq_context: 0 rcu_read_lock rcu_read_lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 rcu_read_lock rcu_read_lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rcu_read_lock rcu_read_lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock rcu_read_lock &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex uevent_sock_mutex.wait_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem remove_cache_srcu &rq->__lock &cfs_rq->removed.lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &journal->j_barrier &journal->j_checkpoint_mutex rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &ctx->tx_lock rcu_node_0 irq_context: 0 &mm->mmap_lock &anon_vma->rwsem fill_pool_map-wait-type-override &c->lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &anon_vma->rwsem fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &mm->mmap_lock &anon_vma->rwsem fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem fill_pool_map-wait-type-override pool_lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &n->list_lock irq_context: 0 &iint->mutex mapping.invalidate_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 quarantine_lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rlock-AF_PACKET irq_context: 0 sk_lock-AF_INET6 &dccp_hashinfo.bhash[i].lock &dccp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 pernet_ops_rwsem ovs_mutex nf_ct_proto_mutex nf_hook_mutex per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &iint->mutex mapping.invalidate_lock &xa->xa_lock#8 &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex &root->kernfs_rwsem &p->pi_lock irq_context: 0 pernet_ops_rwsem rtnl_mutex mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) rcu_node_0 irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex subsys mutex#17 &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock jbd2_handle rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 remove_cache_srcu rcu_node_0 irq_context: 0 pernet_ops_rwsem subsys mutex#84 &rq->__lock irq_context: 0 pernet_ops_rwsem subsys mutex#84 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem kernfs_idr_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem kernfs_idr_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem kernfs_idr_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem kernfs_idr_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 pernet_ops_rwsem &root->kernfs_rwsem kernfs_idr_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock batched_entropy_u8.lock crngs.lock irq_context: softirq (&icsk->icsk_retransmit_timer) per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock rcu_read_lock_bh &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex pool_lock#2 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex pool_lock#2 irq_context: 0 &type->s_umount_key#23/1 fs_reclaim &rq->__lock irq_context: 0 &type->s_umount_key#23/1 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_read_lock &tb->tb6_lock &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock fastopen_seqlock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock fastopen_seqlock fastopen_seqlock.seqcount irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_ALG &mm->mmap_lock &sem->wait_lock irq_context: 0 sk_lock-AF_ALG &sem->wait_lock irq_context: 0 sk_lock-AF_ALG &p->pi_lock irq_context: 0 sk_lock-AF_ALG &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_ALG &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_ALG &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock batched_entropy_u32.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &tcp_hashinfo.bhash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &hashinfo->ehash_locks[i] irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &queue->rskq_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock &ei->socket.wq.wait irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock remove_cache_srcu pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh noop_qdisc.q.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh pool_lock#2 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 elock-AF_INET6 irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock kfence_freelist_lock irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &meta->lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex uevent_sock_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &root->kernfs_rwsem &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &root->kernfs_rwsem pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex fill_pool_map-wait-type-override &n->list_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu quarantine_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu &c->lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu &n->list_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex nf_hook_mutex remove_cache_srcu pool_lock#2 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex rcu_read_lock &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock xps_map_mutex &rq->__lock irq_context: 0 pernet_ops_rwsem rtnl_mutex cpu_hotplug_lock xps_map_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pernet_ops_rwsem rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC fill_pool_map-wait-type-override &pcp->lock &zone->lock irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC fill_pool_map-wait-type-override &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem k-sk_lock-AF_RXRPC fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &c->lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem rtnl_mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)events free_ipc_work rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rlock-AF_INET6 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC remove_cache_srcu irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC remove_cache_srcu quarantine_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC remove_cache_srcu &c->lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC remove_cache_srcu &n->list_lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC remove_cache_srcu &obj_hash[i].lock irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 pernet_ops_rwsem k-sk_lock-AF_RXRPC remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex &meta->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle batched_entropy_u32.lock crngs.lock base_crng.lock irq_context: 0 (work_completion)(&hdev->power_on) irq_context: 0 &hdev->req_lock (work_completion)(&(&hdev->interleave_scan)->work) irq_context: 0 &hdev->req_lock (work_completion)(&(&hdev->interleave_scan)->work) &rq->__lock irq_context: 0 &hdev->req_lock hci_dev_list_lock irq_context: 0 &hdev->req_lock (work_completion)(&hdev->tx_work) irq_context: 0 &hdev->req_lock (work_completion)(&hdev->rx_work) irq_context: 0 &hdev->req_lock (work_completion)(&(&hdev->rpa_expired)->work) irq_context: 0 &hdev->req_lock &hdev->lock fs_reclaim irq_context: 0 &hdev->req_lock &hdev->lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 br_ioctl_mutex rtnl_mutex &mm->mmap_lock fs_reclaim irq_context: 0 br_ioctl_mutex rtnl_mutex &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &hdev->req_lock &hdev->lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex &mm->mmap_lock &____s->seqcount irq_context: 0 &hdev->req_lock &hdev->lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex &mm->mmap_lock stock_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &mm->mmap_lock &mm->page_table_lock irq_context: 0 &hdev->req_lock &hdev->lock &c->lock irq_context: 0 br_ioctl_mutex rtnl_mutex &mm->mmap_lock ptlock_ptr(page) irq_context: 0 &hdev->req_lock &hdev->lock tk_core.seq.seqcount irq_context: 0 br_ioctl_mutex rtnl_mutex &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 &hdev->req_lock &hdev->lock hci_sk_list.lock irq_context: 0 br_ioctl_mutex rtnl_mutex stock_lock irq_context: 0 &hdev->req_lock tk_core.seq.seqcount irq_context: 0 br_ioctl_mutex rtnl_mutex stack_depot_init_mutex irq_context: 0 &hdev->req_lock hci_sk_list.lock irq_context: 0 br_ioctl_mutex rtnl_mutex crngs.lock irq_context: 0 br_ioctl_mutex rtnl_mutex &obj_hash[i].lock pool_lock irq_context: 0 &hdev->req_lock (work_completion)(&hdev->cmd_work) irq_context: 0 br_ioctl_mutex rtnl_mutex &____s->seqcount#2 irq_context: 0 &hdev->req_lock (work_completion)(&hdev->cmd_work) &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex krc.lock irq_context: 0 &hdev->req_lock (work_completion)(&(&hdev->cmd_timer)->work) irq_context: 0 rcu_read_lock &f->f_owner.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 batched_entropy_u8.lock crngs.lock irq_context: 0 sk_lock-AF_INET (console_sem).lock irq_context: 0 sk_lock-AF_INET console_lock console_srcu console_owner_lock irq_context: 0 sk_lock-AF_INET console_lock console_srcu console_owner irq_context: 0 sk_lock-AF_INET console_lock console_srcu console_owner &port_lock_key irq_context: 0 sk_lock-AF_INET console_lock console_srcu console_owner console_owner_lock irq_context: 0 &root->kernfs_rwsem pgd_lock irq_context: 0 &root->kernfs_rwsem stock_lock irq_context: 0 &root->kernfs_rwsem rcu_read_lock pool_lock#2 irq_context: 0 &root->kernfs_rwsem key irq_context: 0 &root->kernfs_rwsem pcpu_lock irq_context: 0 &root->kernfs_rwsem percpu_counters_lock irq_context: 0 &root->kernfs_rwsem pcpu_lock stock_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &sem->wait_lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &sem->wait_lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &n->list_lock &c->lock irq_context: 0 nf_sockopt_mutex &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem batched_entropy_u8.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem kfence_freelist_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &meta->lock irq_context: 0 &xt[i].mutex &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock fs_reclaim irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock &____s->seqcount irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock stock_lock irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock ptlock_ptr(page)#2 lock#4 irq_context: 0 (wq_completion)events (work_completion)(&aux->work) &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&aux->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock &n->lock pool_lock#2 irq_context: 0 rcu_read_lock &ul->lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#12 irq_context: 0 br_ioctl_mutex rtnl_mutex remove_cache_srcu irq_context: 0 br_ioctl_mutex rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 &f->f_pos_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock pcpu_lock irq_context: 0 &vma->vm_lock->lock &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock crngs.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 fs_reclaim mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 fs_reclaim mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &mapping->i_mmap_rwsem &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock batched_entropy_u8.lock crngs.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &meta->lock irq_context: 0 &xt[i].mutex purge_vmap_area_lock quarantine_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &pfk->dump_lock &net->xfrm.xfrm_state_lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_UNIX &mm->mmap_lock irq_context: 0 sk_lock-AF_UNIX fs_reclaim irq_context: 0 sk_lock-AF_UNIX fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_UNIX fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sk_lock-AF_UNIX fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_UNIX free_vmap_area_lock irq_context: 0 sk_lock-AF_UNIX vmap_area_lock irq_context: 0 sk_lock-AF_UNIX &____s->seqcount irq_context: 0 sk_lock-AF_UNIX stock_lock irq_context: 0 sk_lock-AF_UNIX pcpu_alloc_mutex irq_context: 0 sk_lock-AF_UNIX pcpu_alloc_mutex pcpu_lock irq_context: 0 sk_lock-AF_UNIX &obj_hash[i].lock irq_context: 0 sk_lock-AF_UNIX &c->lock irq_context: 0 sk_lock-AF_UNIX pack_mutex irq_context: 0 sk_lock-AF_UNIX batched_entropy_u32.lock irq_context: 0 sk_lock-AF_UNIX text_mutex irq_context: 0 sk_lock-AF_UNIX text_mutex ptlock_ptr(page)#2 irq_context: 0 sk_lock-AF_UNIX &fp->aux->used_maps_mutex irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 &cfs_rq->removed.lock irq_context: 0 &vma->vm_lock->lock fs_reclaim mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &n->list_lock &c->lock irq_context: 0 kn->active#65 fs_reclaim irq_context: 0 kn->active#65 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#65 &c->lock irq_context: 0 kn->active#65 stock_lock irq_context: 0 kn->active#65 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#65 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#65 &kernfs_locks->open_file_mutex[count] fs_reclaim &rq->__lock irq_context: 0 kn->active#65 &kernfs_locks->open_file_mutex[count] fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#65 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#65 devcgroup_mutex irq_context: 0 smc_v6_hashinfo.lock irq_context: 0 (work_completion)(&smc->connect_work) irq_context: 0 sk_lock-AF_SMC smc_v6_hashinfo.lock irq_context: 0 &smc->clcsock_release_lock k-sk_lock-AF_INET6 irq_context: 0 &smc->clcsock_release_lock k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 &smc->clcsock_release_lock k-sk_lock-AF_INET6 k-clock-AF_INET6 irq_context: 0 &smc->clcsock_release_lock k-sk_lock-AF_INET6 k-slock-AF_INET6 &obj_hash[i].lock irq_context: 0 &smc->clcsock_release_lock k-sk_lock-AF_INET6 k-slock-AF_INET6 elock-AF_INET6 irq_context: 0 &smc->clcsock_release_lock k-slock-AF_INET6 irq_context: 0 &smc->clcsock_release_lock pool_lock#2 irq_context: 0 &smc->clcsock_release_lock &dir->lock irq_context: 0 &smc->clcsock_release_lock &obj_hash[i].lock irq_context: 0 &smc->clcsock_release_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &sch->q.lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &sch->q.lock batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &sch->q.lock batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock &sch->q.lock tk_core.seq.seqcount irq_context: 0 &smc->clcsock_release_lock stock_lock irq_context: 0 &smc->clcsock_release_lock &sb->s_type->i_lock_key#8 irq_context: 0 &smc->clcsock_release_lock &xa->xa_lock#8 irq_context: 0 &smc->clcsock_release_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &smc->clcsock_release_lock &fsnotify_mark_srcu irq_context: 0 sk_lock-AF_SMC clock-AF_SMC irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_TIPC quarantine_lock irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start pgd_lock irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start stock_lock irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start key irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start pcpu_lock irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start percpu_counters_lock irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start pcpu_lock stock_lock irq_context: 0 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock quarantine_lock irq_context: 0 &group->mark_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &group->mark_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock kfence_freelist_lock irq_context: 0 rtnl_mutex &macvlan_netdev_addr_lock_key/2 irq_context: 0 (wq_completion)wg-kex-wg0#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &list->lock#14 irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh &r->producer_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock_bh rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh tk_core.seq.seqcount irq_context: 0 rtnl_mutex team->team_lock_key#4 &vlan_netdev_addr_lock_key/1 irq_context: 0 rtnl_mutex team->team_lock_key#4 &vlan_netdev_addr_lock_key/1 pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#4 &vlan_netdev_addr_lock_key/1 _xmit_ETHER irq_context: 0 rtnl_mutex team->team_lock_key#4 &vlan_netdev_addr_lock_key/1 _xmit_ETHER pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock k-clock-AF_INET irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock k-clock-AF_INET irq_context: 0 sk_lock-AF_INET &token_hash[i].lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET stock_lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] batched_entropy_u8.lock irq_context: 0 ipvs->sync_mutex irq_context: 0 sk_lock-AF_INET &mm->mmap_lock stock_lock irq_context: 0 sk_lock-AF_INET &mm->mmap_lock ptlock_ptr(page) irq_context: 0 sk_lock-AF_INET &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_INET &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET &mm->mmap_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET &mm->mmap_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &n->list_lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &n->list_lock &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#4 &____s->seqcount#2 irq_context: 0 rtnl_mutex team->team_lock_key#4 &macvlan_netdev_addr_lock_key/2 irq_context: 0 rtnl_mutex team->team_lock_key#4 &macvlan_netdev_addr_lock_key/2 &vlan_netdev_addr_lock_key/1 irq_context: 0 rtnl_mutex team->team_lock_key#4 &macvlan_netdev_addr_lock_key/2 &vlan_netdev_addr_lock_key/1 _xmit_ETHER irq_context: 0 rtnl_mutex team->team_lock_key#4 pcpu_alloc_mutex irq_context: 0 rtnl_mutex team->team_lock_key#4 pcpu_alloc_mutex pcpu_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 pcpu_alloc_mutex &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#4 pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#4 &macvlan_netdev_addr_lock_key/2 pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#4 &idev->mc_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 &tb->tb6_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 &tb->tb6_lock pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#4 &tb->tb6_lock nl_table_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 &tb->tb6_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#4 &tb->tb6_lock nl_table_wait.lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET rcu_read_lock rcu_read_lock &ul->lock irq_context: 0 rtnl_mutex team->team_lock_key#4 (inet6addr_validator_chain).rwsem irq_context: 0 rtnl_mutex team->team_lock_key#4 stock_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#4 &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &net->ipv6.addrconf_hash_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &net->sctp.local_addr_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#4 pcpu_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 &tb->tb6_lock &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#4 &ifa->lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 sk_lock-AF_INET &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sk_lock-AF_INET &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock stock_lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock &____s->seqcount#13 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6 &(&bp->lock)->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6 &(&bp->lock)->lock &____s->seqcount#13 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6 &(&bp->lock)->lock &____s->seqcount#13 pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock &base->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock &base->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock stock_lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock &c->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock &ul->lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key#4 &ndev->lock &ifa->lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &tb->tb6_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 &pn->hash_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 &dev->tx_global_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 &dev->tx_global_lock &qdisc_xmit_lock_key irq_context: 0 rtnl_mutex team->team_lock_key#4 &dev->tx_global_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_state.exp_mutex rcu_node_0 irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_state.exp_mutex &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#4 dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 irq_context: 0 rtnl_mutex team->team_lock_key#4 dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 &sch->q.lock irq_context: 0 rtnl_mutex team->team_lock_key#4 &sch->q.lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET/1 slock-AF_INET pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET/1 slock-AF_INET tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET/1 slock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET/1 slock-AF_INET rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET/1 slock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET/1 slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET/1 slock-AF_INET &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET/1 slock-AF_INET &base->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET/1 slock-AF_INET &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET/1 slock-AF_INET rcu_read_lock &ei->socket.wq.wait irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET/1 slock-AF_INET rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET/1 slock-AF_INET rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET/1 slock-AF_INET rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#4 quarantine_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 &vlan_netdev_addr_lock_key/1 _xmit_ETHER &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#4 &vlan_netdev_addr_lock_key/1 _xmit_ETHER krc.lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#4 __ip_vs_mutex irq_context: 0 rtnl_mutex team->team_lock_key#4 __ip_vs_mutex &ipvs->dest_trash_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 &macvlan_netdev_addr_lock_key/2 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#4 &macvlan_netdev_addr_lock_key/2 krc.lock irq_context: 0 rtnl_mutex team->team_lock_key#4 krc.lock irq_context: 0 rtnl_mutex team->team_lock_key#4 &tbl->lock irq_context: 0 rtnl_mutex team->team_lock_key#4 class irq_context: 0 rtnl_mutex team->team_lock_key#4 (&tbl->proxy_timer) irq_context: 0 rtnl_mutex team->team_lock_key#4 &base->lock irq_context: 0 rtnl_mutex team->team_lock_key#4 flowtable_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 flowtable_lock &ht->lock irq_context: 0 rtnl_mutex team->team_lock_key#4 flowtable_lock rcu_read_lock &ht->lock irq_context: 0 rtnl_mutex team->team_lock_key#4 flowtable_lock &(&flowtable->gc_work)->timer irq_context: 0 rtnl_mutex team->team_lock_key#4 flowtable_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#4 flowtable_lock &base->lock irq_context: 0 rtnl_mutex team->team_lock_key#4 flowtable_lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#4 flowtable_lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex team->team_lock_key#4 flowtable_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#4 flowtable_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#4 flowtable_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 flowtable_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#4 flowtable_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#4 flowtable_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#4 flowtable_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#4 flowtable_lock (work_completion)(&(&flowtable->gc_work)->work) irq_context: 0 rtnl_mutex team->team_lock_key#4 flowtable_lock (wq_completion)nf_ft_offload_add irq_context: 0 rtnl_mutex team->team_lock_key#4 flowtable_lock &wq->mutex irq_context: 0 rtnl_mutex team->team_lock_key#4 flowtable_lock &wq->mutex &pool->lock/1 irq_context: 0 rtnl_mutex team->team_lock_key#4 flowtable_lock &wq->mutex &x->wait#10 irq_context: 0 rtnl_mutex team->team_lock_key#4 flowtable_lock (wq_completion)nf_ft_offload_del irq_context: 0 rtnl_mutex team->team_lock_key#4 flowtable_lock (wq_completion)nf_ft_offload_stats irq_context: 0 rtnl_mutex team->team_lock_key#4 &dir->lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &tb->tb6_lock rt6_exception_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &tb->tb6_lock pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &tb->tb6_lock nl_table_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &tb->tb6_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &tb->tb6_lock nl_table_wait.lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &tb->tb6_lock &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#4 &net->ipv6.addrconf_hash_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 &net->ipv6.addrconf_hash_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#4 &ndev->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &dir->lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock krc.lock irq_context: 0 rtnl_mutex team->team_lock_key#4 &idev->mc_lock &macvlan_netdev_addr_lock_key/2 irq_context: 0 rtnl_mutex team->team_lock_key#4 &idev->mc_lock &macvlan_netdev_addr_lock_key/2 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#4 &idev->mc_lock &macvlan_netdev_addr_lock_key/2 pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#4 &idev->mc_lock &macvlan_netdev_addr_lock_key/2 krc.lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 rtnl_mutex team->team_lock_key#4 &idev->mc_query_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 &idev->mc_query_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#4 (work_completion)(&(&idev->mc_report_work)->work) irq_context: 0 rtnl_mutex team->team_lock_key#4 &net->ipv6.fib6_gc_lock rcu_read_lock &tb->tb6_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 &net->ipv6.fib6_gc_lock rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 &net->ipv6.fib6_gc_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &k->k_lock klist_remove_lock irq_context: 0 &type->i_mutex_dir_key/1 irq_context: 0 &type->i_mutex_dir_key/1 rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key/1 rcu_read_lock &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key/1 &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key/1 pool_lock#2 irq_context: 0 &type->i_mutex_dir_key/1 tomoyo_ss irq_context: 0 &type->i_mutex_dir_key/1 tomoyo_ss mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key/1 tomoyo_ss pool_lock#2 irq_context: 0 &type->i_mutex_dir_key/1 tomoyo_ss rcu_read_lock mount_lock.seqcount irq_context: 0 &type->i_mutex_dir_key/1 tomoyo_ss rcu_read_lock rcu_read_lock rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key/1 tomoyo_ss &obj_hash[i].lock irq_context: 0 &type->i_mutex_dir_key/1 tomoyo_ss &c->lock irq_context: 0 &type->i_mutex_dir_key/1 tomoyo_ss rcu_read_lock &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key/1 tomoyo_ss tomoyo_policy_lock irq_context: 0 &type->i_mutex_dir_key/1 tomoyo_ss tomoyo_policy_lock mmu_notifier_invalidate_range_start irq_context: 0 &type->i_mutex_dir_key/1 tomoyo_ss tomoyo_policy_lock pool_lock#2 irq_context: 0 &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 irq_context: 0 &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 tk_core.seq.seqcount irq_context: 0 &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 rcu_read_lock &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key/1 &sb->s_type->i_mutex_key#4 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key/1 &sb->s_type->i_lock_key#5 irq_context: 0 &type->i_mutex_dir_key/1 &sb->s_type->i_lock_key#5 &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key/1 &fsnotify_mark_srcu irq_context: 0 &type->i_mutex_dir_key/1 &s->s_inode_list_lock irq_context: 0 &type->i_mutex_dir_key/1 &sbinfo->stat_lock irq_context: 0 &type->i_mutex_dir_key/1 &xa->xa_lock#8 irq_context: 0 &type->i_mutex_dir_key/1 &dentry->d_lock irq_context: 0 rtnl_mutex subsys mutex#82 &k->k_lock klist_remove_lock irq_context: 0 rtnl_mutex gdp_mutex sysfs_symlink_target_lock irq_context: 0 rtnl_mutex gdp_mutex kernfs_idr_lock irq_context: 0 rtnl_mutex gdp_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex (work_completion)(&port->bc_work) irq_context: softirq (&icsk->icsk_delack_timer) k-slock-AF_INET rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: softirq (&icsk->icsk_delack_timer) k-slock-AF_INET rcu_read_lock rcu_read_lock &ul->lock irq_context: softirq (&msk->sk.icsk_retransmit_timer) irq_context: softirq (&msk->sk.icsk_retransmit_timer) slock-AF_INET irq_context: softirq (&msk->sk.icsk_retransmit_timer) slock-AF_INET rcu_read_lock &pool->lock irq_context: softirq (&msk->sk.icsk_retransmit_timer) slock-AF_INET rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq (&msk->sk.icsk_retransmit_timer) slock-AF_INET rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq (&msk->sk.icsk_retransmit_timer) slock-AF_INET rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq (&msk->sk.icsk_retransmit_timer) slock-AF_INET rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq (&msk->sk.icsk_retransmit_timer) slock-AF_INET rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET slock-AF_INET &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET slock-AF_INET &base->lock irq_context: 0 (wq_completion)events (work_completion)(&msk->work) sk_lock-AF_INET slock-AF_INET &base->lock &obj_hash[i].lock irq_context: softirq (&icsk->icsk_retransmit_timer) k-slock-AF_INET irq_context: softirq (&icsk->icsk_retransmit_timer) k-slock-AF_INET tk_core.seq.seqcount irq_context: softirq (&icsk->icsk_retransmit_timer) k-slock-AF_INET &c->lock irq_context: softirq (&icsk->icsk_retransmit_timer) k-slock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq (&icsk->icsk_retransmit_timer) k-slock-AF_INET rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq (&icsk->icsk_retransmit_timer) k-slock-AF_INET rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq (&icsk->icsk_retransmit_timer) k-slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq (&icsk->icsk_retransmit_timer) k-slock-AF_INET &obj_hash[i].lock irq_context: softirq (&icsk->icsk_retransmit_timer) k-slock-AF_INET &base->lock irq_context: softirq (&icsk->icsk_retransmit_timer) k-slock-AF_INET &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 &hashinfo->ehash_locks[i] irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 fs_reclaim irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 remove_cache_srcu irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 remove_cache_srcu quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 remove_cache_srcu &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 remove_cache_srcu &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 remove_cache_srcu pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 rcu_read_lock rcu_read_lock &ct->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 &base->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 slock-AF_INET irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 k-slock-AF_INET &tcp_hashinfo.bhash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 k-slock-AF_INET &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 k-slock-AF_INET &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 k-slock-AF_INET &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 k-slock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 k-slock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 k-slock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 k-slock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET slock-AF_INET rcu_read_lock pool_lock#2 irq_context: 0 &root->kernfs_iattr_rwsem rcu_node_0 irq_context: 0 rtnl_mutex team->team_lock_key#4 &vlan_netdev_addr_lock_key/1 &c->lock irq_context: 0 sk_lock-AF_INET k-sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &n->list_lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#4 rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 &type->i_mutex_dir_key/1 tomoyo_ss &____s->seqcount#2 irq_context: 0 &type->i_mutex_dir_key/1 tomoyo_ss &____s->seqcount irq_context: 0 rtnl_mutex team->team_lock_key#3 &vlan_netdev_addr_lock_key/1 irq_context: 0 rtnl_mutex team->team_lock_key#3 &vlan_netdev_addr_lock_key/1 _xmit_ETHER irq_context: 0 rtnl_mutex team->team_lock_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&icsk->icsk_delack_timer) k-slock-AF_INET rcu_read_lock &obj_hash[i].lock irq_context: softirq (&icsk->icsk_delack_timer) k-slock-AF_INET rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#3 &macvlan_netdev_addr_lock_key/2 irq_context: 0 rtnl_mutex team->team_lock_key#3 &macvlan_netdev_addr_lock_key/2 &vlan_netdev_addr_lock_key/1 irq_context: 0 rtnl_mutex team->team_lock_key#3 &macvlan_netdev_addr_lock_key/2 &vlan_netdev_addr_lock_key/1 _xmit_ETHER irq_context: 0 rtnl_mutex team->team_lock_key#3 pcpu_alloc_mutex irq_context: 0 rtnl_mutex team->team_lock_key#3 pcpu_alloc_mutex pcpu_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 net_rwsem &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#3 net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#3 &idev->mc_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &tb->tb6_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &tb->tb6_lock nl_table_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &tb->tb6_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &tb->tb6_lock nl_table_wait.lock irq_context: 0 rtnl_mutex team->team_lock_key#3 (inet6addr_validator_chain).rwsem irq_context: 0 rtnl_mutex team->team_lock_key#3 stock_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock &net->ipv6.addrconf_hash_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock rcu_read_lock &pool->lock/1 irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock &net->sctp.local_addr_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#3 pcpu_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &ifa->lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#3 &ndev->lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &ndev->lock &ifa->lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock &tb->tb6_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &pn->hash_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &dev->tx_global_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &dev->tx_global_lock &qdisc_xmit_lock_key irq_context: 0 rtnl_mutex team->team_lock_key#3 &dev->tx_global_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_state.exp_mutex rcu_node_0 irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_state.exp_mutex &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#3 dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 irq_context: 0 rtnl_mutex team->team_lock_key#3 dev->qdisc_tx_busylock ?: &qdisc_tx_busylock#2 &sch->q.lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &sch->q.lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &vlan_netdev_addr_lock_key/1 _xmit_ETHER &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &vlan_netdev_addr_lock_key/1 _xmit_ETHER krc.lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 rtnl_mutex team->team_lock_key#3 __ip_vs_mutex irq_context: 0 rtnl_mutex team->team_lock_key#3 __ip_vs_mutex &ipvs->dest_trash_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &macvlan_netdev_addr_lock_key/2 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &macvlan_netdev_addr_lock_key/2 krc.lock irq_context: 0 rtnl_mutex team->team_lock_key#3 krc.lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &tbl->lock irq_context: 0 rtnl_mutex team->team_lock_key#3 class irq_context: 0 rtnl_mutex team->team_lock_key#3 (&tbl->proxy_timer) irq_context: 0 rtnl_mutex team->team_lock_key#3 &base->lock irq_context: 0 rtnl_mutex team->team_lock_key#3 flowtable_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 flowtable_lock &ht->lock irq_context: 0 rtnl_mutex team->team_lock_key#3 flowtable_lock rcu_read_lock &ht->lock irq_context: 0 rtnl_mutex team->team_lock_key#3 flowtable_lock &(&flowtable->gc_work)->timer irq_context: 0 rtnl_mutex team->team_lock_key#3 flowtable_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#3 flowtable_lock &base->lock irq_context: 0 rtnl_mutex team->team_lock_key#3 flowtable_lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#3 flowtable_lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex team->team_lock_key#3 flowtable_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#3 flowtable_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 flowtable_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#3 flowtable_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#3 flowtable_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#3 flowtable_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#3 flowtable_lock (work_completion)(&(&flowtable->gc_work)->work) irq_context: 0 rtnl_mutex team->team_lock_key#3 flowtable_lock (wq_completion)nf_ft_offload_add irq_context: 0 rtnl_mutex team->team_lock_key#3 flowtable_lock &wq->mutex irq_context: 0 rtnl_mutex team->team_lock_key#3 flowtable_lock &wq->mutex &pool->lock/1 irq_context: 0 rtnl_mutex team->team_lock_key#3 flowtable_lock &wq->mutex &x->wait#10 irq_context: 0 rtnl_mutex team->team_lock_key#3 flowtable_lock (wq_completion)nf_ft_offload_del irq_context: 0 rtnl_mutex team->team_lock_key#3 flowtable_lock (wq_completion)nf_ft_offload_stats irq_context: 0 rtnl_mutex team->team_lock_key#3 &dir->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET/1 slock-AF_INET &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock &tb->tb6_lock rt6_exception_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock &tb->tb6_lock &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock &tb->tb6_lock nl_table_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock &tb->tb6_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock &tb->tb6_lock nl_table_wait.lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &net->ipv6.addrconf_hash_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &net->ipv6.addrconf_hash_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &net->ipv6.addrconf_hash_lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &net->ipv6.addrconf_hash_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &ndev->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock &dir->lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock krc.lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &idev->mc_lock &macvlan_netdev_addr_lock_key/2 irq_context: 0 rtnl_mutex team->team_lock_key#3 &idev->mc_lock &macvlan_netdev_addr_lock_key/2 &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &idev->mc_lock &macvlan_netdev_addr_lock_key/2 krc.lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 rtnl_mutex team->team_lock_key#3 &idev->mc_query_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &idev->mc_query_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#3 (work_completion)(&(&idev->mc_report_work)->work) irq_context: 0 rtnl_mutex team->team_lock_key#3 (work_completion)(&(&idev->mc_report_work)->work) &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#3 (work_completion)(&(&idev->mc_report_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex team->team_lock_key#3 &net->ipv6.fib6_gc_lock rcu_read_lock &tb->tb6_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &net->ipv6.fib6_gc_lock rcu_read_lock &tb->tb6_lock &net->ipv6.fib6_walker_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &net->ipv6.fib6_gc_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#3 batched_entropy_u8.lock irq_context: 0 rtnl_mutex team->team_lock_key#3 batched_entropy_u8.lock crngs.lock irq_context: 0 rtnl_mutex team->team_lock_key#3 kfence_freelist_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &meta->lock irq_context: 0 &type->i_mutex_dir_key/1 tomoyo_ss &n->list_lock irq_context: 0 &type->i_mutex_dir_key/1 tomoyo_ss &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 k-slock-AF_INET &tcp_hashinfo.bhash[i].lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET k-sk_lock-AF_INET/1 k-slock-AF_INET &tcp_hashinfo.bhash[i].lock rcu_read_lock pool_lock#2 irq_context: softirq (&icsk->icsk_retransmit_timer) k-slock-AF_INET pool_lock#2 irq_context: 0 rtnl_mutex team->team_lock_key#3 &vlan_netdev_addr_lock_key/1 &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &vlan_netdev_addr_lock_key/1 &n->list_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &vlan_netdev_addr_lock_key/1 &n->list_lock &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &tb->tb6_lock &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &tb->tb6_lock &n->list_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &tb->tb6_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 rtnl_mutex team->team_lock_key#3 &____s->seqcount#2 irq_context: 0 rtnl_mutex team->team_lock_key#3 quarantine_lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock pgd_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock stock_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock key irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock pcpu_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock percpu_counters_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock pcpu_lock stock_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex team->team_lock_key#3 &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock &____s->seqcount irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_read_lock &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex team->team_lock_key#3 rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key/1 tomoyo_ss tomoyo_policy_lock &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 br_ioctl_mutex rtnl_mutex batched_entropy_u32.lock crngs.lock irq_context: 0 br_ioctl_mutex rtnl_mutex &xa->xa_lock#3 irq_context: 0 br_ioctl_mutex rtnl_mutex &x->wait#9 irq_context: 0 br_ioctl_mutex rtnl_mutex &k->list_lock irq_context: 0 br_ioctl_mutex rtnl_mutex gdp_mutex irq_context: 0 br_ioctl_mutex rtnl_mutex gdp_mutex &k->list_lock irq_context: 0 br_ioctl_mutex rtnl_mutex bus_type_sem irq_context: 0 br_ioctl_mutex rtnl_mutex &root->kernfs_rwsem irq_context: 0 br_ioctl_mutex rtnl_mutex &dev->power.lock irq_context: 0 br_ioctl_mutex rtnl_mutex dpm_list_mtx irq_context: 0 br_ioctl_mutex rtnl_mutex uevent_sock_mutex irq_context: 0 br_ioctl_mutex rtnl_mutex uevent_sock_mutex fs_reclaim irq_context: 0 br_ioctl_mutex rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 br_ioctl_mutex rtnl_mutex uevent_sock_mutex pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex uevent_sock_mutex nl_table_lock irq_context: 0 br_ioctl_mutex rtnl_mutex uevent_sock_mutex &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex uevent_sock_mutex nl_table_wait.lock irq_context: 0 br_ioctl_mutex rtnl_mutex subsys mutex#17 irq_context: 0 br_ioctl_mutex rtnl_mutex subsys mutex#17 &k->k_lock irq_context: 0 br_ioctl_mutex rtnl_mutex uevent_sock_mutex &c->lock irq_context: 0 br_ioctl_mutex rtnl_mutex dev_hotplug_mutex irq_context: 0 br_ioctl_mutex rtnl_mutex dev_hotplug_mutex &dev->power.lock irq_context: 0 br_ioctl_mutex rtnl_mutex dev_base_lock irq_context: 0 br_ioctl_mutex rtnl_mutex input_pool.lock irq_context: 0 br_ioctl_mutex rtnl_mutex &tbl->lock irq_context: 0 br_ioctl_mutex rtnl_mutex sysctl_lock irq_context: 0 br_ioctl_mutex rtnl_mutex failover_lock irq_context: 0 br_ioctl_mutex rtnl_mutex proc_subdir_lock irq_context: 0 br_ioctl_mutex rtnl_mutex proc_inum_ida.xa_lock irq_context: 0 br_ioctl_mutex rtnl_mutex proc_subdir_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &idev->mc_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 br_ioctl_mutex rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 br_ioctl_mutex rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 br_ioctl_mutex rtnl_mutex &idev->mc_lock pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex &idev->mc_lock &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key irq_context: 0 br_ioctl_mutex rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex &br->hash_lock &n->list_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->hash_lock &n->list_lock &c->lock irq_context: 0 br_ioctl_mutex rtnl_mutex &pnettable->lock irq_context: 0 br_ioctl_mutex rtnl_mutex smc_ib_devices.mutex irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock crngs.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_PACKET &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &cfs_rq->removed.lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cgroup_threadgroup_rwsem rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 kn->active#66 fs_reclaim irq_context: 0 kn->active#66 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#66 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 kn->active#66 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#66 rcu_read_lock rcu_node_0 irq_context: 0 kn->active#66 rcu_read_lock &rq->__lock irq_context: 0 kn->active#66 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#66 stock_lock irq_context: 0 kn->active#66 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#66 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#66 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#66 cpu_hotplug_lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#66 cpu_hotplug_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#66 cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#66 cpu_hotplug_lock cpuset_mutex irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#66 cpu_hotplug_lock cpuset_mutex fs_reclaim irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#66 cpu_hotplug_lock cpuset_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#66 cpu_hotplug_lock cpuset_mutex pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#11 &of->mutex kn->active#66 cpu_hotplug_lock cpuset_mutex &obj_hash[i].lock irq_context: 0 &po->bind_lock irq_context: 0 clock-AF_PACKET irq_context: 0 elock-AF_PACKET irq_context: 0 bpf_devs_lock irq_context: 0 bpf_devs_lock fs_reclaim irq_context: 0 bpf_devs_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 bpf_devs_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 bpf_devs_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 bpf_devs_lock pool_lock#2 irq_context: 0 bpf_devs_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rtnl_mutex irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rtnl_mutex bpf_devs_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sk_lock-AF_PACKET rcu_state.exp_mutex &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_TIPC rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_TIPC slock-AF_TIPC &sk->sk_lock.wq irq_context: 0 slock-AF_TIPC &sk->sk_lock.wq irq_context: 0 slock-AF_TIPC &sk->sk_lock.wq &p->pi_lock irq_context: 0 slock-AF_TIPC &sk->sk_lock.wq &p->pi_lock &rq->__lock irq_context: 0 slock-AF_TIPC &sk->sk_lock.wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#9 rcu_read_lock &obj_hash[i].lock irq_context: 0 &smc->clcsock_release_lock fs_reclaim irq_context: 0 &smc->clcsock_release_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 &smc->clcsock_release_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &smc->clcsock_release_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_AX25 ax25_dev_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &rq->__lock &cfs_rq->removed.lock irq_context: 0 &pipe->mutex/1 sk_lock-AF_INET6 &ei->socket.wq.wait irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock &obj_hash[i].lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock &base->lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock &base->lock &obj_hash[i].lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock pool_lock#2 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &ul->lock#2 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 tasklist_lock &sighand->siglock &p->pi_lock irq_context: 0 tasklist_lock &sighand->siglock &p->pi_lock &rq->__lock irq_context: 0 tasklist_lock &sighand->siglock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pcpu_lock stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock stock_lock irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock &____s->seqcount#13 irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET6 &(&bp->lock)->lock irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET6 &(&bp->lock)->lock &____s->seqcount#13 irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET6 &(&bp->lock)->lock &____s->seqcount#13 pool_lock#2 irq_context: 0 sk_lock-AF_INET6 &f->f_lock irq_context: 0 sk_lock-AF_INET6 &f->f_lock fasync_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &new->fa_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &new->fa_lock &f->f_owner.lock irq_context: 0 rcu_read_lock_bh _xmit_ETHER#2 irq_context: 0 rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: 0 rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock rlock-AF_PACKET irq_context: 0 rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 &f->f_lock fasync_lock &new->fa_lock irq_context: 0 sk_lock-AF_INET6 &f->f_lock fasync_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 &f->f_lock fasync_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &n->lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &n->lock &dir->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &n->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &n->lock &____s->seqcount irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &n->lock stock_lock irq_context: softirq (&peer->timer_retransmit_handshake) &list->lock#14 &obj_hash[i].lock irq_context: softirq (&peer->timer_retransmit_handshake) &list->lock#14 pool_lock#2 irq_context: softirq (&peer->timer_retransmit_handshake) &list->lock#14 &meta->lock irq_context: softirq (&peer->timer_retransmit_handshake) &list->lock#14 kfence_freelist_lock irq_context: 0 sb_writers#8 kn->active#5 &cfs_rq->removed.lock irq_context: 0 sb_writers#8 kn->active#5 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &n->lock quarantine_lock irq_context: 0 rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &c->lock irq_context: 0 rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &n->list_lock irq_context: 0 rcu_read_lock_bh _xmit_ETHER#2 rcu_read_lock &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 rcu_state.exp_mutex pool_lock#2 irq_context: softirq (&n->timer) rcu_read_lock &ei->socket.wq.wait irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &n->lock &base->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &n->lock &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock stock_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &ul->lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &____s->seqcount irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &n->lock &meta->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock &n->lock kfence_freelist_lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] &cfs_rq->removed.lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 &root->kernfs_iattr_rwsem pgd_lock irq_context: 0 &root->kernfs_iattr_rwsem stock_lock irq_context: 0 &root->kernfs_iattr_rwsem key irq_context: 0 &root->kernfs_iattr_rwsem pcpu_lock irq_context: 0 &root->kernfs_iattr_rwsem percpu_counters_lock irq_context: 0 &root->kernfs_iattr_rwsem pcpu_lock stock_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex fs_reclaim pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 &u->iolock &mm->mmap_lock stock_lock irq_context: 0 rlock-AF_CAN irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_IEEE802154 (console_sem).lock irq_context: 0 sk_lock-AF_IEEE802154 console_lock console_srcu console_owner_lock irq_context: 0 sk_lock-AF_IEEE802154 console_lock console_srcu console_owner irq_context: 0 sk_lock-AF_IEEE802154 console_lock console_srcu console_owner &port_lock_key irq_context: 0 sk_lock-AF_IEEE802154 console_lock console_srcu console_owner console_owner_lock irq_context: 0 sk_lock-AF_IEEE802154 &rq->__lock irq_context: 0 sk_lock-AF_IEEE802154 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rcu_read_lock &service->lock irq_context: 0 rcu_read_lock rcu_read_lock (console_sem).lock irq_context: 0 rcu_read_lock rcu_read_lock console_lock console_srcu console_owner_lock irq_context: 0 rcu_read_lock rcu_read_lock console_lock console_srcu console_owner irq_context: 0 rcu_read_lock rcu_read_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 rcu_read_lock rcu_read_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 &sig->cred_guard_mutex &ei->xattr_sem rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &ht->lock irq_context: 0 cb_lock genl_mutex k-sk_lock-AF_TIPC irq_context: 0 cb_lock genl_mutex k-sk_lock-AF_TIPC k-slock-AF_TIPC irq_context: 0 cb_lock genl_mutex k-slock-AF_TIPC irq_context: 0 &u->iolock &u->lock rlock-AF_UNIX irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] batched_entropy_u8.lock irq_context: 0 sk_lock-AF_INET slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock quarantine_lock irq_context: 0 nf_sockopt_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &ei->socket.wq.wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: 0 &sb->s_type->i_mutex_key#9 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &tcp_hashinfo.bhash[i].lock stock_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET6 &list->lock#5 irq_context: 0 cb_lock genl_mutex calipso_doi_list_lock irq_context: 0 pcpu_alloc_mutex remove_cache_srcu irq_context: 0 pcpu_alloc_mutex remove_cache_srcu &rq->__lock irq_context: 0 pcpu_alloc_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pcpu_alloc_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex rcu_read_lock &obj_hash[i].lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh &r->producer_lock#3 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &meta->lock irq_context: 0 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &ei->i_es_lock key#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#5 &nsim_trap_data->trap_lock batched_entropy_u8.lock crngs.lock irq_context: 0 &audit_cmd_mutex.lock &c->lock irq_context: 0 kn->active#5 &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 remove_cache_srcu pool_lock#2 irq_context: 0 cb_lock &rdev->wiphy.mtx rcu_node_0 irq_context: 0 sb_writers#4 sb_internal mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_internal mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 &vma->vm_lock->lock ptlock_ptr(page)#2 rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh rcu_read_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &ei->socket.wq.wait irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 pgd_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 key irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 pcpu_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 percpu_counters_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 pcpu_lock stock_lock irq_context: 0 &type->i_mutex_dir_key#3 &____s->seqcount#2 irq_context: 0 &type->i_mutex_dir_key#3 rcu_read_lock &dentry->d_lock irq_context: 0 tracepoints_mutex reg_lock irq_context: 0 tracepoints_mutex reg_lock fs_reclaim irq_context: 0 tracepoints_mutex reg_lock fs_reclaim &rq->__lock irq_context: 0 tracepoints_mutex reg_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tracepoints_mutex reg_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 tracepoints_mutex reg_lock &c->lock irq_context: 0 tracepoints_mutex reg_lock pool_lock#2 irq_context: 0 &mm->mmap_lock lock#10 irq_context: 0 &mm->mmap_lock lock#10 rcu_read_lock kernfs_rename_lock irq_context: 0 &mm->mmap_lock lock#10 irq_context: 0 &mm->mmap_lock lock#10 rcu_read_lock kernfs_rename_lock irq_context: 0 tracepoints_mutex reg_lock rcu_state.exp_mutex rcu_node_0 irq_context: 0 tracepoints_mutex reg_lock rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 tracepoints_mutex reg_lock rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 tracepoints_mutex reg_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 tracepoints_mutex reg_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 tracepoints_mutex reg_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 tracepoints_mutex reg_lock rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tracepoints_mutex reg_lock rcu_state.exp_mutex &rq->__lock irq_context: 0 tracepoints_mutex reg_lock rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &pipe->mutex/1 &mm->mmap_lock lock#10 irq_context: 0 dup_mmap_sem &mm->mmap_lock lock#10 irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 lock#10 irq_context: 0 tracepoints_mutex reg_lock rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 tracepoints_mutex reg_lock &obj_hash[i].lock irq_context: 0 link_idr_lock &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_SCO sco_sk_list.lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_SCO &mm->mmap_lock irq_context: 0 rtnl_mutex rcu_read_lock batched_entropy_u8.lock irq_context: 0 rtnl_mutex rcu_read_lock kfence_freelist_lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock_bh &meta->lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock_bh kfence_freelist_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#11 &type->i_mutex_dir_key#7 &dentry->d_lock &wq#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&net->ipv6.addr_chk_work)->work) rtnl_mutex rcu_read_lock_bh &ifa->lock irq_context: 0 sk_lock-AF_TIPC slock-AF_TIPC rcu_read_lock &ei->socket.wq.wait &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex remove_cache_srcu irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &obj_hash[i].lock irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &base->lock irq_context: softirq &(&bat_priv->nc.work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#3 rcu_node_0 irq_context: 0 &xt[i].mutex rcu_read_lock pgd_lock irq_context: 0 &xt[i].mutex rcu_read_lock stock_lock irq_context: 0 &xt[i].mutex rcu_read_lock key irq_context: 0 &xt[i].mutex rcu_read_lock pcpu_lock irq_context: 0 &xt[i].mutex rcu_read_lock percpu_counters_lock irq_context: 0 &xt[i].mutex rcu_read_lock pcpu_lock stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss batched_entropy_u8.lock crngs.lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 quarantine_lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 sb_writers#8 kn->active#5 rcu_node_0 irq_context: 0 sb_writers#8 kn->active#5 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#8 kn->active#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#8 kn->active#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 kn->active#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_PACKET rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_PACKET rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 &ping_table.lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock &____s->seqcount#7 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock &____s->seqcount#10 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock_bh &sch->q.lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rlock-AF_INET6 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &ei->socket.wq.wait irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &____s->seqcount irq_context: 0 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 elock-AF_INET6 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rhashtable_bucket irq_context: softirq rcu_read_lock rcu_read_lock &q->lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &ndev->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq rcu_read_lock rcu_read_lock &q->lock#2 &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock &q->lock#2 &base->lock irq_context: softirq rcu_read_lock rcu_read_lock &q->lock#2 &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock &q->lock#2 rcu_read_lock rcu_read_lock rhashtable_bucket irq_context: softirq rcu_read_lock rcu_read_lock &q->lock#2 rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: softirq rcu_read_lock rcu_read_lock &q->lock#2 rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock &q->lock#2 rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock &q->lock#2 rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock &q->lock#2 rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock &q->lock#2 rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock &q->lock#2 pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock &q->lock#2 &c->lock irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock &base->lock irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock &base->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock stock_lock irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock &c->lock irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock &ul->lock#2 irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: 0 (wq_completion)events (work_completion)(&work->work)#3 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&work->work)#3 rcu_read_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&work->work)#3 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&work->work)#3 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&work->work)#3 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&work->work)#3 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 &group->mark_mutex &fsnotify_mark_srcu &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &vma->vm_lock->lock pgd_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &vma->vm_lock->lock stock_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &vma->vm_lock->lock key irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &vma->vm_lock->lock pcpu_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &vma->vm_lock->lock percpu_counters_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &vma->vm_lock->lock pcpu_lock stock_lock irq_context: 0 &pipe->mutex/1 &mm->mmap_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex stack_depot_init_mutex &rq->__lock irq_context: 0 rtnl_mutex stack_depot_init_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &obj_hash[i].lock pool_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 crngs.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock rcu_read_lock &ct->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 krc.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &base->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &base->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &zone->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &zone->lock &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &list->lock#18 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &p->lock &of->mutex kn->active#5 remove_cache_srcu rcu_read_lock &____s->seqcount irq_context: 0 cb_lock &fw_cache.lock irq_context: 0 cb_lock &fw_cache.lock pool_lock#2 irq_context: 0 cb_lock tk_core.seq.seqcount irq_context: 0 cb_lock async_lock irq_context: 0 cb_lock init_task.alloc_lock irq_context: 0 cb_lock init_task.alloc_lock init_fs.lock irq_context: 0 cb_lock init_task.alloc_lock init_fs.lock &dentry->d_lock irq_context: 0 cb_lock rcu_read_lock &____s->seqcount#4 irq_context: 0 cb_lock rcu_read_lock &dentry->d_lock irq_context: 0 cb_lock &type->i_mutex_dir_key#3 irq_context: 0 cb_lock &type->i_mutex_dir_key#3 fs_reclaim irq_context: 0 cb_lock &type->i_mutex_dir_key#3 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &type->i_mutex_dir_key#3 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 cb_lock &type->i_mutex_dir_key#3 stock_lock irq_context: 0 cb_lock &type->i_mutex_dir_key#3 &c->lock irq_context: 0 cb_lock &type->i_mutex_dir_key#3 pool_lock#2 irq_context: 0 cb_lock &type->i_mutex_dir_key#3 &dentry->d_lock irq_context: 0 cb_lock &type->i_mutex_dir_key#3 rcu_read_lock rename_lock.seqcount irq_context: 0 cb_lock &type->i_mutex_dir_key#3 &ei->i_es_lock irq_context: 0 cb_lock &type->i_mutex_dir_key#3 &mapping->private_lock irq_context: 0 cb_lock &type->i_mutex_dir_key#3 inode_hash_lock irq_context: 0 cb_lock &type->i_mutex_dir_key#3 mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &type->i_mutex_dir_key#3 &obj_hash[i].lock irq_context: 0 cb_lock &type->i_mutex_dir_key#3 inode_hash_lock &sb->s_type->i_lock_key#22 irq_context: 0 cb_lock &type->i_mutex_dir_key#3 inode_hash_lock &s->s_inode_list_lock irq_context: 0 cb_lock &type->i_mutex_dir_key#3 &journal->j_state_lock irq_context: 0 cb_lock &type->i_mutex_dir_key#3 &sb->s_type->i_lock_key#22 irq_context: 0 cb_lock &type->i_mutex_dir_key#3 &sb->s_type->i_lock_key#22 &dentry->d_lock irq_context: 0 cb_lock &type->i_mutex_dir_key#3 &sb->s_type->i_lock_key#22 &dentry->d_lock &wq irq_context: 0 cb_lock &sb->s_type->i_lock_key#22 irq_context: 0 cb_lock &type->i_mutex_dir_key#3 &ei->i_data_sem irq_context: 0 cb_lock &type->i_mutex_dir_key#3 &ei->i_data_sem mmu_notifier_invalidate_range_start irq_context: 0 cb_lock &type->i_mutex_dir_key#3 &ei->i_data_sem pool_lock#2 irq_context: 0 cb_lock &type->i_mutex_dir_key#3 &ei->i_data_sem &rq->__lock irq_context: 0 cb_lock &type->i_mutex_dir_key#3 &ei->i_data_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock irq_context: 0 cb_lock &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock pool_lock#2 irq_context: 0 cb_lock &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock &sbi->s_es_lock irq_context: 0 cb_lock &type->i_mutex_dir_key#3 &ei->i_data_sem &obj_hash[i].lock irq_context: 0 cb_lock &type->i_mutex_dir_key#3 &ei->i_data_sem &ei->i_es_lock &obj_hash[i].lock irq_context: 0 cb_lock &type->i_mutex_dir_key#3 &____s->seqcount#2 irq_context: 0 cb_lock &type->i_mutex_dir_key#3 batched_entropy_u32.lock irq_context: 0 cb_lock &type->i_mutex_dir_key#3 &____s->seqcount irq_context: 0 cb_lock &type->i_mutex_dir_key#3 &xa->xa_lock#8 irq_context: 0 cb_lock &type->i_mutex_dir_key#3 lock#4 irq_context: 0 cb_lock &type->i_mutex_dir_key#3 tk_core.seq.seqcount irq_context: 0 cb_lock &type->i_mutex_dir_key#3 &dd->lock irq_context: 0 cb_lock &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock irq_context: 0 cb_lock &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 cb_lock &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock &type->i_mutex_dir_key#3 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &type->i_mutex_dir_key#3 &rq->__lock irq_context: 0 cb_lock &type->i_mutex_dir_key#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock &type->i_mutex_dir_key#3 bit_wait_table + i irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 sctp_assocs_id_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 sctp_assocs_id_lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &asoc->wait irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &asoc->wait &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &asoc->wait &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &asoc->wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 batched_entropy_u32.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock crngs.lock irq_context: 0 rtnl_mutex &block->cb_lock flow_indr_block_lock &obj_hash[i].lock irq_context: 0 cb_lock &type->i_mutex_dir_key#3 &dentry->d_lock &wq irq_context: 0 cb_lock &dentry->d_lock irq_context: 0 cb_lock &dentry->d_lock &lru->node[i].lock irq_context: 0 cb_lock &type->i_mutex_dir_key#3 rename_lock.seqcount irq_context: 0 cb_lock &type->i_mutex_dir_key#3 &dentry->d_lock &wq#2 irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu pool_lock#2 irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 cb_lock umhelper_sem irq_context: 0 cb_lock umhelper_sem usermodehelper_disabled_waitq.lock irq_context: 0 cb_lock umhelper_sem fs_reclaim irq_context: 0 cb_lock umhelper_sem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock umhelper_sem pool_lock#2 irq_context: 0 cb_lock umhelper_sem &x->wait#9 irq_context: 0 cb_lock umhelper_sem &obj_hash[i].lock irq_context: 0 cb_lock umhelper_sem &k->list_lock irq_context: 0 cb_lock umhelper_sem gdp_mutex irq_context: 0 cb_lock umhelper_sem gdp_mutex &k->list_lock irq_context: 0 cb_lock umhelper_sem gdp_mutex fs_reclaim irq_context: 0 cb_lock umhelper_sem gdp_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock umhelper_sem gdp_mutex &c->lock irq_context: 0 cb_lock umhelper_sem gdp_mutex &rq->__lock irq_context: 0 cb_lock umhelper_sem gdp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock umhelper_sem gdp_mutex pool_lock#2 irq_context: 0 cb_lock umhelper_sem gdp_mutex lock irq_context: 0 cb_lock umhelper_sem gdp_mutex lock kernfs_idr_lock irq_context: 0 cb_lock umhelper_sem gdp_mutex &root->kernfs_rwsem irq_context: 0 cb_lock umhelper_sem gdp_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 cb_lock umhelper_sem lock irq_context: 0 cb_lock umhelper_sem lock kernfs_idr_lock irq_context: 0 cb_lock umhelper_sem &root->kernfs_rwsem irq_context: 0 cb_lock umhelper_sem &root->kernfs_rwsem &root->kernfs_iattr_rwsem irq_context: 0 cb_lock umhelper_sem bus_type_sem irq_context: 0 cb_lock umhelper_sem sysfs_symlink_target_lock irq_context: 0 cb_lock umhelper_sem &root->kernfs_rwsem irq_context: 0 cb_lock umhelper_sem &c->lock irq_context: 0 cb_lock umhelper_sem &____s->seqcount#2 irq_context: 0 cb_lock umhelper_sem &____s->seqcount irq_context: 0 cb_lock umhelper_sem &dev->power.lock irq_context: 0 cb_lock umhelper_sem dpm_list_mtx irq_context: 0 cb_lock umhelper_sem &k->k_lock irq_context: 0 cb_lock umhelper_sem subsys mutex#80 irq_context: 0 cb_lock umhelper_sem subsys mutex#80 &rq->__lock irq_context: 0 cb_lock umhelper_sem subsys mutex#80 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock umhelper_sem subsys mutex#80 &k->k_lock irq_context: 0 cb_lock umhelper_sem fw_lock irq_context: 0 cb_lock umhelper_sem uevent_sock_mutex irq_context: 0 cb_lock umhelper_sem uevent_sock_mutex fs_reclaim irq_context: 0 cb_lock umhelper_sem uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock umhelper_sem uevent_sock_mutex pool_lock#2 irq_context: 0 cb_lock umhelper_sem uevent_sock_mutex nl_table_lock irq_context: 0 cb_lock umhelper_sem uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 cb_lock umhelper_sem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 cb_lock umhelper_sem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 cb_lock umhelper_sem uevent_sock_mutex nl_table_wait.lock irq_context: 0 cb_lock umhelper_sem uevent_sock_mutex &obj_hash[i].lock irq_context: 0 cb_lock umhelper_sem &x->wait#23 irq_context: 0 cb_lock umhelper_sem &base->lock irq_context: 0 cb_lock umhelper_sem &base->lock &obj_hash[i].lock irq_context: 0 cb_lock umhelper_sem &rq->__lock irq_context: 0 cb_lock umhelper_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock umhelper_sem &cfs_rq->removed.lock irq_context: 0 cb_lock umhelper_sem (&timer.timer) irq_context: 0 cb_lock umhelper_sem fw_lock &x->wait#23 irq_context: 0 cb_lock umhelper_sem dev_pm_qos_sysfs_mtx irq_context: 0 cb_lock umhelper_sem dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 cb_lock umhelper_sem dev_pm_qos_sysfs_mtx &root->kernfs_rwsem irq_context: 0 cb_lock umhelper_sem dev_pm_qos_sysfs_mtx dev_pm_qos_mtx irq_context: 0 cb_lock umhelper_sem &root->kernfs_rwsem kernfs_idr_lock irq_context: 0 cb_lock umhelper_sem &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 cb_lock umhelper_sem &root->kernfs_rwsem pool_lock#2 irq_context: 0 cb_lock umhelper_sem kernfs_idr_lock irq_context: 0 cb_lock umhelper_sem &k->k_lock klist_remove_lock irq_context: 0 cb_lock umhelper_sem subsys mutex#80 &k->k_lock klist_remove_lock irq_context: 0 cb_lock umhelper_sem deferred_probe_mutex irq_context: 0 cb_lock umhelper_sem device_links_lock irq_context: 0 cb_lock umhelper_sem mmu_notifier_invalidate_range_start irq_context: 0 cb_lock umhelper_sem uevent_sock_mutex mmu_notifier_invalidate_range_start irq_context: 0 cb_lock umhelper_sem uevent_sock_mutex &c->lock irq_context: 0 cb_lock umhelper_sem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq irq_context: 0 cb_lock umhelper_sem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock irq_context: 0 cb_lock umhelper_sem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock irq_context: 0 cb_lock umhelper_sem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock umhelper_sem gdp_mutex sysfs_symlink_target_lock irq_context: 0 cb_lock umhelper_sem gdp_mutex &obj_hash[i].lock irq_context: 0 cb_lock fw_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &tn->nametbl_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &ht->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex rcu_read_lock &ht->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex k-sk_lock-AF_TIPC irq_context: 0 cb_lock genl_mutex rtnl_mutex k-sk_lock-AF_TIPC k-slock-AF_TIPC irq_context: 0 cb_lock genl_mutex rtnl_mutex k-slock-AF_TIPC irq_context: 0 cb_lock genl_mutex rtnl_mutex &tn->nametbl_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &tn->nametbl_lock &service->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &tn->nametbl_lock &service->lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &tn->nametbl_lock &nt->cluster_scope_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &tn->nametbl_lock &____s->seqcount irq_context: 0 cb_lock genl_mutex rtnl_mutex &tn->nametbl_lock rcu_read_lock pool_lock#2 irq_context: 0 cb_lock genl_mutex rtnl_mutex &tn->nametbl_lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &tipc_net(net)->bclock irq_context: 0 &vma->vm_lock->lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &nsim_trap_data->trap_lock batched_entropy_u8.lock crngs.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 quarantine_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&map->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex rtnl_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 ppp_mutex rtnl_mutex &sem->wait_lock irq_context: 0 ppp_mutex rtnl_mutex &p->pi_lock irq_context: 0 ppp_mutex rtnl_mutex &p->pi_lock &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex rtnl_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 ppp_mutex rtnl_mutex quarantine_lock irq_context: 0 ppp_mutex rtnl_mutex remove_cache_srcu pool_lock#2 irq_context: 0 ppp_mutex rtnl_mutex pcpu_lock irq_context: 0 ppp_mutex &obj_hash[i].lock irq_context: 0 ppp_mutex krc.lock irq_context: 0 ppp_mutex &dir->lock#2 irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock &cfs_rq->removed.lock irq_context: 0 &tsk->futex_exit_mutex &mm->mmap_lock &obj_hash[i].lock irq_context: 0 ppp_mutex rtnl_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &f->f_lock fasync_lock irq_context: 0 ppp_mutex &____s->seqcount#2 irq_context: 0 ppp_mutex &____s->seqcount irq_context: 0 (wq_completion)netns net_cleanup_work pernet_ops_rwsem &root->kernfs_rwsem &root->kernfs_iattr_rwsem &sem->wait_lock irq_context: 0 &root->kernfs_rwsem pool_lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &____s->seqcount irq_context: 0 sb_writers#8 tomoyo_ss remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &base->lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sbi->s_writepages_rwsem rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 rlock-AF_KCM irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_TIPC &mm->mmap_lock ptlock_ptr(page)#2 lock#4 &lruvec->lru_lock irq_context: 0 sk_lock-AF_TIPC &zone->lock &____s->seqcount irq_context: 0 sk_lock-AF_KCM rcu_read_lock &ei->socket.wq.wait irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss remove_cache_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&app->join_timer) &app->lock init_task.mems_allowed_seq.seqcount irq_context: 0 sk_lock-AF_INET6/1 irq_context: 0 sk_lock-AF_INET6/1 slock-AF_INET6 irq_context: 0 sk_lock-AF_INET6/1 rlock-AF_INET6 irq_context: 0 sk_lock-AF_INET6/1 &list->lock#17 irq_context: 0 &net->sctp.addr_wq_lock irq_context: 0 &net->sctp.addr_wq_lock slock-AF_INET6/1 irq_context: 0 &net->sctp.addr_wq_lock slock-AF_INET6/1 &sctp_ep_hashtable[i].lock irq_context: 0 &net->sctp.addr_wq_lock slock-AF_INET6/1 &obj_hash[i].lock irq_context: 0 &net->sctp.addr_wq_lock slock-AF_INET6/1 pool_lock#2 irq_context: 0 &net->sctp.addr_wq_lock slock-AF_INET6/1 clock-AF_INET6 irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex rcu_read_lock pool_lock#2 irq_context: 0 &type->i_mutex_dir_key#6 &n->list_lock irq_context: 0 &type->i_mutex_dir_key#6 &n->list_lock &c->lock irq_context: 0 sb_writers#8 kn->active#5 remove_cache_srcu rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 kn->active#5 remove_cache_srcu rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 kn->active#5 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock &c->lock irq_context: 0 remove_cache_srcu &____s->seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock &htab->buckets[i].lock &c->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &htab->buckets[i].lock &psock->link_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &htab->buckets[i].lock &psock->link_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock &htab->buckets[i].lock clock-AF_INET irq_context: 0 sk_lock-AF_INET rcu_read_lock &htab->buckets[i].lock &psock->ingress_lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &htab->buckets[i].lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock &htab->buckets[i].lock pool_lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock &htab->buckets[i].lock krc.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &h->lhash2[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &queue->rskq_lock irq_context: 0 (wq_completion)events (work_completion)(&(&psock->rwork)->work) &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) kfence_freelist_lock irq_context: 0 rlock-AF_CAIF irq_context: 0 sk_lock-AF_CAIF &this->info_list_lock irq_context: 0 sk_lock-AF_CAIF (console_sem).lock irq_context: 0 sk_lock-AF_CAIF rcu_state.exp_mutex rcu_node_0 irq_context: 0 sk_lock-AF_CAIF rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 sk_lock-AF_CAIF rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_CAIF rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_CAIF rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sk_lock-AF_CAIF rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_CAIF rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_CAIF rcu_state.exp_mutex &rq->__lock irq_context: 0 sk_lock-AF_CAIF rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_CAIF &ei->socket.wq.wait irq_context: 0 sk_lock-AF_CAIF &rq->__lock irq_context: 0 sk_lock-AF_CAIF &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_CAIF clock-AF_CAIF irq_context: 0 elock-AF_CAIF irq_context: 0 sk_lock-AF_CAIF rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#4 &rq->__lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: 0 &head->lock irq_context: 0 fanout_mutex remove_cache_srcu irq_context: 0 fanout_mutex remove_cache_srcu quarantine_lock irq_context: 0 fanout_mutex remove_cache_srcu &c->lock irq_context: 0 fanout_mutex remove_cache_srcu &n->list_lock irq_context: 0 fanout_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 fanout_mutex remove_cache_srcu pool_lock#2 irq_context: 0 fanout_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 fanout_mutex remove_cache_srcu &rq->__lock irq_context: 0 rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock irq_context: 0 rcu_read_lock rcu_read_lock_bh &sch->q.lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#13 irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &(&bp->lock)->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &(&bp->lock)->lock &____s->seqcount#13 irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &(&bp->lock)->lock &____s->seqcount#13 pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &base->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &base->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rhashtable_bucket irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &q->lock#2 irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &q->lock#2 &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &q->lock#2 pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#13 irq_context: 0 rcu_read_lock rcu_read_lock_bh &sch->q.lock &c->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh &sch->q.lock &n->list_lock irq_context: 0 rcu_read_lock rcu_read_lock_bh &sch->q.lock &n->list_lock &c->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &q->lock#2 &base->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &q->lock#2 &base->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &q->lock#2 rcu_read_lock rcu_read_lock rhashtable_bucket irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &q->lock#2 &zone->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &q->lock#2 &____s->seqcount irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &q->lock#2 (console_sem).lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &q->lock#2 console_lock console_srcu console_owner_lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &q->lock#2 console_lock console_srcu console_owner irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &q->lock#2 console_lock console_srcu console_owner &port_lock_key irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &q->lock#2 console_lock console_srcu console_owner console_owner_lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &zone->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &zone->lock &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock &q->lock#2 (console_sem).lock irq_context: softirq rcu_read_lock rcu_read_lock &q->lock#2 console_lock console_srcu console_owner_lock irq_context: softirq rcu_read_lock rcu_read_lock &q->lock#2 console_lock console_srcu console_owner irq_context: softirq rcu_read_lock rcu_read_lock &q->lock#2 console_lock console_srcu console_owner &port_lock_key irq_context: softirq rcu_read_lock rcu_read_lock &q->lock#2 console_lock console_srcu console_owner console_owner_lock irq_context: 0 kn->active#5 fs_reclaim &cfs_rq->removed.lock irq_context: 0 rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock irq_context: 0 rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 cb_lock genl_mutex rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rcu_read_lock &tipc_net(net)->bclock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock &q->lock#2 &c->lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 resource_mutex &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 resource_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 fanout_mutex &____s->seqcount#2 irq_context: 0 fanout_mutex &____s->seqcount irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock &q->lock#2 &zone->lock irq_context: softirq rcu_read_lock rcu_read_lock &q->lock#2 &____s->seqcount irq_context: 0 rtnl_mutex &n->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &n->lock &(&n->hh.hh_lock)->lock irq_context: 0 rtnl_mutex &n->lock &(&n->hh.hh_lock)->lock &____s->seqcount#10 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &(&bp->lock)->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &(&bp->lock)->lock &____s->seqcount#13 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock &(&bp->lock)->lock &____s->seqcount#13 pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 pgd_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 key irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 pcpu_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 percpu_counters_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 pcpu_lock stock_lock irq_context: 0 sb_writers#5 &fsnotify_mark_srcu &rq->__lock irq_context: 0 sb_writers#5 &fsnotify_mark_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock rename_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 batched_entropy_u8.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 kfence_freelist_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &ndev->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &ndev->lock irq_context: 0 &sb->s_type->i_mutex_key#9 &rcu_state.gp_wq irq_context: 0 &sb->s_type->i_mutex_key#9 &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#9 &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#9 &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_owner.lock irq_context: 0 &f->f_lock fasync_lock &new->fa_lock irq_context: 0 &f->f_lock fasync_lock &obj_hash[i].lock irq_context: 0 &f->f_lock fasync_lock pool_lock#2 irq_context: 0 cgroup_threadgroup_rwsem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 cgroup_threadgroup_rwsem rcu_read_lock &obj_hash[i].lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock pool_lock#2 irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: 0 remove_cache_srcu pool_lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock &c->lock irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex cpu_hotplug_lock static_call_mutex text_mutex &rq->__lock irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex cpu_hotplug_lock static_call_mutex text_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 file_rwsem rcu_node_0 irq_context: 0 rtnl_mutex bpf_dispatcher_xdp.mutex rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 clock-AF_NETLINK irq_context: 0 wlock-AF_NETLINK irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 remove_cache_srcu pool_lock#2 irq_context: 0 rtnl_mutex &block->cb_lock flow_indr_block_lock &n->list_lock irq_context: 0 rtnl_mutex &block->cb_lock flow_indr_block_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex &block->cb_lock flow_indr_block_lock &rq->__lock irq_context: 0 rtnl_mutex &block->cb_lock flow_indr_block_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 ppp_mutex rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex rtnl_mutex net_rwsem &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem rcu_read_lock pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem &sem->wait_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex &c->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex &n->list_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex &n->list_lock &c->lock irq_context: 0 rtnl_mutex &pn->all_ppp_mutex &rq->__lock irq_context: 0 rtnl_mutex &pn->all_ppp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nfnl_subsys_ipset ip_set_ref_lock irq_context: 0 sk_lock-AF_PHONET &mm->mmap_lock irq_context: 0 sk_lock-AF_PHONET fs_reclaim irq_context: 0 sk_lock-AF_PHONET fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_PHONET free_vmap_area_lock irq_context: 0 sk_lock-AF_PHONET vmap_area_lock irq_context: 0 sk_lock-AF_PHONET &____s->seqcount irq_context: 0 sk_lock-AF_PHONET stock_lock irq_context: 0 sk_lock-AF_PHONET &c->lock irq_context: 0 sk_lock-AF_PHONET pcpu_alloc_mutex irq_context: 0 sk_lock-AF_PHONET pcpu_alloc_mutex pcpu_lock irq_context: 0 sk_lock-AF_PHONET &rq->__lock irq_context: 0 sk_lock-AF_PHONET &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_PHONET pool_lock#2 irq_context: 0 sk_lock-AF_PHONET rcu_read_lock &pool->lock irq_context: 0 sk_lock-AF_PHONET rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_PHONET rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 sk_lock-AF_PHONET rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 sk_lock-AF_PHONET rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_PHONET rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &pnn->pndevs.lock &c->lock irq_context: 0 rtnl_mutex &pnn->pndevs.lock &rq->__lock irq_context: 0 rtnl_mutex &pnn->pndevs.lock pool_lock#2 irq_context: 0 &ep->mtx kernfs_idr_lock irq_context: 0 gdp_mutex kernfs_idr_lock irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_state_lock pool_lock#2 irq_context: 0 &net->xfrm.xfrm_cfg_mutex &x->lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock mmu_notifier_invalidate_range_start &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_lock_key rcu_read_lock &dentry->d_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_lock_key &dentry->d_lock &dentry->d_lock/1 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_lock_key &dentry->d_lock/1 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&flowtable->gc_work)->work) &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &lock->wait_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &p->pi_lock irq_context: 0 rtnl_mutex rcu_read_lock stock_lock irq_context: 0 rtnl_mutex rcu_read_lock pcpu_lock stock_lock irq_context: 0 &tfile->napi_mutex pcpu_lock irq_context: 0 &tfile->napi_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/2 rcu_read_lock _xmit_ETHER irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/2 rcu_read_lock &bridge_netdev_addr_lock_key/1 irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_node_0 irq_context: 0 misc_mtx fill_pool_map-wait-type-override &rq->__lock irq_context: 0 misc_mtx fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#9 &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss batched_entropy_u8.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss kfence_freelist_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &meta->lock irq_context: 0 &mm->mmap_lock fs_reclaim &rq->__lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock fs_reclaim &rq->__lock &base->lock irq_context: 0 &mm->mmap_lock fs_reclaim &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &journal->j_state_lock &journal->j_wait_transaction_locked irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock bit_wait_table + i irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP/1 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#5 &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock batched_entropy_u8.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock kfence_freelist_lock irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock &c->lock irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock &n->list_lock irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock &n->list_lock &c->lock irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock &dir->lock#2 irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock &ul->lock irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock &____s->seqcount#13 irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET &(&bp->lock)->lock irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET &(&bp->lock)->lock &____s->seqcount#13 irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET &(&bp->lock)->lock &____s->seqcount#13 pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET &dir->lock#2 irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET tk_core.seq.seqcount irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET &list->lock#5 irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock &tbl->lock irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock &n->lock irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock &____s->seqcount#9 irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &r->producer_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock init_task.mems_allowed_seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock &meta->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem &ei->i_es_lock kfence_freelist_lock irq_context: 0 nfnl_subsys_ulog &log->instances_lock irq_context: 0 nfnl_subsys_ulog &log->instances_lock &c->lock irq_context: 0 nfnl_subsys_ulog &log->instances_lock pool_lock#2 irq_context: 0 nfnl_subsys_ulog &log->instances_lock &obj_hash[i].lock irq_context: 0 nfnl_subsys_ulog &log->instances_lock &pcp->lock &zone->lock irq_context: 0 nfnl_subsys_ulog &log->instances_lock &____s->seqcount irq_context: 0 nfnl_subsys_ulog &log->instances_lock rcu_read_lock pool_lock#2 irq_context: 0 nfnl_subsys_ulog &log->instances_lock &dir->lock irq_context: 0 nfnl_subsys_ulog &inst->lock irq_context: 0 nfnl_subsys_ulog &rq->__lock irq_context: 0 nfnl_subsys_ulog &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &log->instances_lock &inst->lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &log->instances_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &log->instances_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &ei->i_data_sem rcu_read_lock &obj_hash[i].lock irq_context: 0 ppp_mutex rtnl_mutex &tn->lock &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex free_vmap_area_lock irq_context: 0 ppp_mutex vmap_area_lock irq_context: 0 ppp_mutex pcpu_alloc_mutex irq_context: 0 ppp_mutex pcpu_alloc_mutex pcpu_lock irq_context: 0 ppp_mutex pack_mutex irq_context: 0 ppp_mutex batched_entropy_u32.lock irq_context: 0 ppp_mutex text_mutex irq_context: 0 ppp_mutex text_mutex ptlock_ptr(page)#2 irq_context: 0 ppp_mutex &fp->aux->used_maps_mutex irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock crngs.lock irq_context: softirq (&app->join_timer)#2 batched_entropy_u32.lock crngs.lock base_crng.lock irq_context: 0 bpf_dispatcher_xdp.mutex &rnp->exp_lock irq_context: 0 bpf_dispatcher_xdp.mutex rcu_state.exp_mutex irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 rtnl_mutex &app->lock &c->lock irq_context: 0 rtnl_mutex &vlan_netdev_addr_lock_key/1 _xmit_ETHER &c->lock irq_context: 0 rtnl_mutex &vlan_netdev_addr_lock_key/1 _xmit_ETHER pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &dev->tx_global_lock &vlan_netdev_xmit_lock_key irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &rnp->exp_wq[2] irq_context: 0 bpf_dispatcher_xdp.mutex rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 bpf_dispatcher_xdp.mutex rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 mem_id_lock irq_context: 0 mem_id_lock &rq->__lock irq_context: 0 mem_id_lock fs_reclaim irq_context: 0 mem_id_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 mem_id_lock pool_lock#2 irq_context: 0 mem_id_lock batched_entropy_u32.lock irq_context: 0 mem_id_lock &obj_hash[i].lock irq_context: 0 mem_id_lock mem_id_pool.xa_lock irq_context: 0 mem_id_lock rcu_read_lock rhashtable_bucket irq_context: 0 rcu_read_lock rcu_read_lock &r->producer_lock#4 irq_context: 0 &r->consumer_lock#4 irq_context: 0 mem_id_lock &ht->lock irq_context: 0 mem_id_lock rcu_read_lock &ht->lock irq_context: 0 mem_id_lock rcu_read_lock rcu_read_lock rhashtable_bucket irq_context: 0 mem_id_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 mem_id_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 mem_id_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 mem_id_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 mem_id_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 mem_id_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 mem_id_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 mem_id_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 mem_id_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 mem_id_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 mem_id_lock rcu_read_lock pool_lock#2 irq_context: 0 mem_id_lock rcu_read_lock rcu_node_0 irq_context: 0 bpf_dispatcher_xdp.mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 bpf_dispatcher_xdp.mutex rcu_state.exp_mutex.wait_lock irq_context: 0 bpf_dispatcher_xdp.mutex &p->pi_lock irq_context: 0 bpf_dispatcher_xdp.mutex &p->pi_lock &rq->__lock irq_context: 0 bpf_dispatcher_xdp.mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 rtnl_mutex &app->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &vlan_netdev_addr_lock_key/1 _xmit_ETHER &obj_hash[i].lock irq_context: 0 rtnl_mutex &vlan_netdev_addr_lock_key/1 _xmit_ETHER krc.lock irq_context: 0 rtnl_mutex &vlan_netdev_addr_lock_key/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex &vlan_netdev_addr_lock_key/1 krc.lock irq_context: 0 rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 irq_context: 0 rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 pool_lock#2 irq_context: 0 rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key/1 krc.lock irq_context: softirq rcu_callback mem_id_pool.xa_lock irq_context: 0 elock-AF_CAN irq_context: 0 tracepoints_mutex cpu_hotplug_lock jump_label_mutex jump_label_mutex.wait_lock irq_context: 0 ppp_mutex rtnl_mutex lock kernfs_idr_lock &n->list_lock irq_context: 0 ppp_mutex rtnl_mutex lock kernfs_idr_lock &n->list_lock &c->lock irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock rcu_read_lock &ws->lock irq_context: 0 cb_lock genl_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock rcu_read_lock &ws->lock tk_core.seq.seqcount irq_context: 0 cb_lock genl_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock rcu_read_lock &ws->lock &obj_hash[i].lock irq_context: 0 cb_lock rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 &ep->mtx batched_entropy_u8.lock irq_context: 0 &ep->mtx kfence_freelist_lock irq_context: 0 &ep->mtx &meta->lock irq_context: 0 &root->kernfs_rwsem &sem->wait_lock irq_context: 0 gdp_mutex kernfs_idr_lock &obj_hash[i].lock irq_context: 0 gdp_mutex kernfs_idr_lock pool_lock#2 irq_context: 0 &hdev->req_lock &obj_hash[i].lock pool_lock irq_context: 0 &hdev->req_lock &lock->wait_lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI rlock-AF_BLUETOOTH irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI hci_dev_list_lock pool_lock#2 irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI hci_dev_list_lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI hci_dev_list_lock rlock-AF_BLUETOOTH irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI hci_dev_list_lock &c->lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI hci_dev_list_lock &n->list_lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI hci_dev_list_lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI hci_sk_list.lock pool_lock#2 irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI hci_sk_list.lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI hci_sk_list.lock rlock-AF_BLUETOOTH irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI batched_entropy_u8.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI kfence_freelist_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &meta->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 jbd2_handle &sbi->s_orphan_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock pool_lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock tcp_metrics_lock &c->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock tcp_metrics_lock &____s->seqcount irq_context: 0 sk_lock-AF_INET rcu_read_lock tcp_metrics_lock rcu_read_lock pool_lock#2 irq_context: 0 sk_lock-AF_INET rcu_read_lock tcp_metrics_lock &obj_hash[i].lock irq_context: softirq &(&hinfo->gc_work)->timer irq_context: softirq &(&hinfo->gc_work)->timer rcu_read_lock &pool->lock irq_context: softirq &(&hinfo->gc_work)->timer rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq &(&hinfo->gc_work)->timer rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hinfo->gc_work)->work) irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hinfo->gc_work)->work) &hinfo->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hinfo->gc_work)->work) &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hinfo->gc_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hinfo->gc_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hinfo->gc_work)->work) &base->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hinfo->gc_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &c->lock irq_context: 0 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hinfo->gc_work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hinfo->gc_work)->work) pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hinfo->gc_work)->work) rcu_node_0 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hinfo->gc_work)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hinfo->gc_work)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hinfo->gc_work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&hinfo->gc_work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&hinfo->gc_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&hinfo->gc_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &rq->__lock &cfs_rq->removed.lock irq_context: 0 ppp_mutex rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &sem->wait_lock irq_context: 0 ppp_mutex init_mm.page_table_lock irq_context: 0 ppp_mutex rcu_read_lock rcu_node_0 irq_context: 0 ppp_mutex rcu_read_lock &rq->__lock irq_context: 0 kn->active#67 fs_reclaim irq_context: 0 kn->active#67 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#67 stock_lock irq_context: 0 kn->active#67 &rq->__lock irq_context: 0 kn->active#67 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#67 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#67 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#67 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#67 &c->lock irq_context: 0 sk_lock-AF_UNIX &u->iolock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &vma->vm_lock->lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 pgd_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 stock_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 rcu_read_lock pool_lock#2 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 key irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 pcpu_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 percpu_counters_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 pcpu_lock stock_lock irq_context: 0 ppp_mutex batched_entropy_u8.lock irq_context: 0 ppp_mutex kfence_freelist_lock irq_context: 0 &xt[i].mutex &mm->mmap_lock pgd_lock irq_context: 0 &xt[i].mutex &mm->mmap_lock stock_lock irq_context: 0 &xt[i].mutex &mm->mmap_lock key irq_context: 0 &xt[i].mutex &mm->mmap_lock pcpu_lock irq_context: 0 &xt[i].mutex &mm->mmap_lock percpu_counters_lock irq_context: 0 &xt[i].mutex &mm->mmap_lock pcpu_lock stock_lock irq_context: 0 &p->lock &of->mutex kn->active#5 pgd_lock irq_context: 0 &p->lock &of->mutex kn->active#5 stock_lock irq_context: 0 &p->lock &of->mutex kn->active#5 key irq_context: 0 &p->lock &of->mutex kn->active#5 pcpu_lock irq_context: 0 &p->lock &of->mutex kn->active#5 percpu_counters_lock irq_context: 0 &p->lock &of->mutex kn->active#5 pcpu_lock stock_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 remove_cache_srcu rcu_read_lock &____s->seqcount irq_context: 0 ppp_mutex rtnl_mutex &root->kernfs_rwsem &cfs_rq->removed.lock irq_context: 0 ppp_mutex rtnl_mutex &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 ppp_mutex rtnl_mutex &root->kernfs_rwsem pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock &lock->wait_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) remove_cache_srcu irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) remove_cache_srcu &c->lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)hci4#2 (work_completion)(&hdev->cmd_work) remove_cache_srcu &obj_hash[i].lock irq_context: 0 &vma->vm_lock->lock pcpu_lock stock_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &obj_hash[i].lock pool_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock crngs.lock irq_context: softirq &(&net->ipv6.addr_chk_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq &(&net->ipv6.addr_chk_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &ndev->lock irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 rcu_read_lock rcu_read_lock k-slock-AF_INET6 rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) pool_lock#2 irq_context: 0 ppp_mutex batched_entropy_u32.lock crngs.lock irq_context: 0 &type->s_umount_key#48 irq_context: 0 sk_lock-AF_AX25 &mm->mmap_lock fs_reclaim irq_context: 0 sk_lock-AF_AX25 &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_AX25 &mm->mmap_lock &____s->seqcount irq_context: 0 sk_lock-AF_AX25 &mm->mmap_lock stock_lock irq_context: 0 sk_lock-AF_AX25 &mm->mmap_lock &mm->page_table_lock irq_context: 0 sk_lock-AF_AX25 &mm->mmap_lock ptlock_ptr(page) irq_context: 0 sk_lock-AF_AX25 &mm->mmap_lock &anon_vma->rwsem irq_context: 0 sk_lock-AF_AX25 &mm->mmap_lock &anon_vma->rwsem &rq->__lock irq_context: 0 sk_lock-AF_AX25 &mm->mmap_lock &anon_vma->rwsem &mm->page_table_lock irq_context: 0 sk_lock-AF_AX25 &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 sk_lock-AF_AX25 &mm->mmap_lock ptlock_ptr(page)#2 lock#4 irq_context: 0 sk_lock-AF_AX25 &mm->mmap_lock &rq->__lock irq_context: 0 sk_lock-AF_AX25 &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock stock_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock mmu_notifier_invalidate_range_start irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &sb->s_type->i_lock_key#8 irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &dir->lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET k-slock-AF_INET irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET &____s->seqcount#8 irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET batched_entropy_u32.lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET &table->hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET &table->hash[i].lock k-clock-AF_INET irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET &table->hash[i].lock &table->hash2[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock k-slock-AF_INET irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock cpu_hotplug_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock cpu_hotplug_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock cpu_hotplug_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET6 irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET6 k-slock-AF_INET6 irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock k-slock-AF_INET6 irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET6 &table->hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET6 &table->hash[i].lock k-clock-AF_INET6 irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock k-sk_lock-AF_INET6 &table->hash[i].lock &table->hash2[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &wg->socket_update_lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &table->hash[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &table->hash[i].lock &table->hash2[i].lock irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock k-clock-AF_INET irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &xa->xa_lock#8 irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock &fsnotify_mark_srcu irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 cb_lock genl_mutex rtnl_mutex &wg->device_update_lock k-clock-AF_INET6 irq_context: 0 ppp_mutex rtnl_mutex rcu_node_0 irq_context: 0 rtnl_mutex dpm_list_mtx &rq->__lock irq_context: 0 rtnl_mutex dpm_list_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_CAN j1939_netdev_lock irq_context: 0 sk_lock-AF_CAN j1939_netdev_lock fs_reclaim irq_context: 0 sk_lock-AF_CAN j1939_netdev_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sk_lock-AF_CAN j1939_netdev_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sk_lock-AF_CAN j1939_netdev_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_CAN j1939_netdev_lock pool_lock#2 irq_context: 0 sk_lock-AF_CAN j1939_netdev_lock &net->can.rcvlists_lock irq_context: 0 sk_lock-AF_CAN &priv->lock irq_context: 0 sk_lock-AF_CAN &rq->__lock irq_context: 0 sk_lock-AF_CAN &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_CAN &priv->j1939_socks_lock irq_context: 0 sk_lock-AF_CAN &jsk->sk_session_queue_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN &priv->j1939_socks_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN &priv->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN j1939_netdev_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN j1939_netdev_lock &net->can.rcvlists_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN j1939_netdev_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN j1939_netdev_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAN j1939_netdev_lock &priv->lock irq_context: 0 rcu_read_lock rcu_read_lock &____s->seqcount#5 irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 fs_reclaim &rq->__lock irq_context: 0 rlock-AF_AX25 irq_context: 0 ppp_mutex rtnl_mutex &cfs_rq->removed.lock irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_state.barrier_mutex pgd_lock irq_context: 0 rcu_state.barrier_mutex stock_lock irq_context: 0 rcu_state.barrier_mutex rcu_read_lock pool_lock#2 irq_context: 0 rcu_state.barrier_mutex &obj_hash[i].lock irq_context: 0 rcu_state.barrier_mutex key irq_context: 0 rcu_state.barrier_mutex pcpu_lock irq_context: 0 rcu_state.barrier_mutex percpu_counters_lock irq_context: 0 rcu_state.barrier_mutex pcpu_lock stock_lock irq_context: 0 rcu_state.barrier_mutex pool_lock#2 irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)events_unbound connector_reaper_work &meta->lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work kfence_freelist_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu &pcp->lock &zone->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &net->xfrm.xfrm_cfg_mutex &net->xfrm.xfrm_policy_lock &____s->seqcount#16 pool_lock#2 irq_context: 0 lock#3 pool_lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_owner.lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 sb_writers#8 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 &nft_net->commit_mutex krc.lock irq_context: 0 ppp_mutex rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &hdev->lock rcu_read_lock &rq->__lock irq_context: 0 &hdev->lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &hdev->lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 &hdev->lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &hdev->lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &hdev->lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex pcpu_alloc_mutex &rq->__lock irq_context: 0 ppp_mutex pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&policy->timer) irq_context: softirq (&policy->timer) &policy->lock irq_context: 0 kn->active#5 &kernfs_locks->open_file_mutex[count] rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &p->pi_lock irq_context: softirq (&n->timer) rcu_read_lock_bh rcu_read_lock &list->lock#5 irq_context: 0 &sb->s_type->i_mutex_key#10 &conn->chan_lock &chan->lock/1 rcu_read_lock &pool->lock/1 irq_context: 0 &sb->s_type->i_mutex_key#10 &conn->chan_lock &chan->lock/1 rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 &conn->chan_lock &chan->lock/1 rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 &conn->chan_lock &chan->lock/1 rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &conn->chan_lock &chan->lock/1 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &conn->chan_lock &chan->lock/1 rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &conn->chan_lock &chan->lock/1 &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &conn->chan_lock &chan->lock/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex stack_depot_init_mutex &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex rtnl_mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 ppp_mutex rtnl_mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex rtnl_mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 ppp_mutex rtnl_mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &____s->seqcount irq_context: 0 ppp_mutex rtnl_mutex pcpu_alloc_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 ppp_mutex rtnl_mutex &pn->all_ppp_mutex &n->list_lock irq_context: 0 ppp_mutex rtnl_mutex &pn->all_ppp_mutex &n->list_lock &c->lock irq_context: 0 ppp_mutex rtnl_mutex &root->kernfs_rwsem &p->pi_lock irq_context: 0 ppp_mutex rtnl_mutex &root->kernfs_rwsem &p->pi_lock &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex &root->kernfs_rwsem &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex quarantine_lock irq_context: 0 ppp_mutex rtnl_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 ppp_mutex rtnl_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 ppp_mutex rtnl_mutex fill_pool_map-wait-type-override &n->list_lock irq_context: 0 ppp_mutex rtnl_mutex fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 ppp_mutex rtnl_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock &n->list_lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: softirq (&n->timer) k-slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 &sb->s_type->i_mutex_key#10 &conn->chan_lock &chan->lock/1 rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock rcu_read_lock_bh &sch->q.lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock rcu_read_lock_bh &sch->q.lock &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock rcu_read_lock_bh &sch->q.lock &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock rcu_read_lock_bh &sch->q.lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock k-slock-AF_INET rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: softirq (&n->timer) k-slock-AF_INET &c->lock irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock rcu_read_lock_bh &sch->q.lock pool_lock#2 irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock crngs.lock irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: softirq (&n->timer) k-slock-AF_INET rcu_read_lock rcu_read_lock_bh &sch->q.lock &c->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#8 &of->mutex kn->active#5 fs_reclaim &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 fs_reclaim &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tvlv.container_list_lock quarantine_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock pgd_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock stock_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock key irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock pcpu_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock percpu_counters_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 rcu_read_lock pcpu_lock stock_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock &lock->wait_lock irq_context: 0 clock-AF_INET6 irq_context: 0 rtnl_mutex device_links_lock &rq->__lock irq_context: 0 rtnl_mutex device_links_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh quarantine_lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_nat_locks[i] irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &nf_conntrack_locks[i]/1 irq_context: softirq &peer->endpoint_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_PPPOX rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 sk_lock-AF_PPPOX rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 &p->lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &rcu_state.expedited_wq irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu &rcu_state.expedited_wq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#68 fs_reclaim irq_context: 0 kn->active#68 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#68 stock_lock irq_context: 0 kn->active#68 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#68 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#68 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim &rq->__lock irq_context: 0 sb_writers#8 kn->active#5 &kernfs_locks->open_file_mutex[count] fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_state.exp_mutex fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 &xt[i].mutex rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &xt[i].mutex rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &xt[i].mutex rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) rcu_node_0 irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock kfence_freelist_lock irq_context: 0 rtnl_mutex gdp_mutex &rq->__lock irq_context: 0 rtnl_mutex gdp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tracepoints_mutex tracepoint_srcu_srcu_usage.lock rcu_read_lock &pool->lock irq_context: 0 tracepoints_mutex tracepoint_srcu_srcu_usage.lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 tracepoints_mutex tracepoint_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 tracepoints_mutex tracepoint_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 tracepoints_mutex tracepoint_srcu_srcu_usage.lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tomoyo_ss rcu_read_lock &p->pi_lock irq_context: 0 tomoyo_ss rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 tomoyo_ss rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tomoyo_ss &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &type->i_mutex_dir_key#5 &rcu_state.expedited_wq irq_context: 0 &type->i_mutex_dir_key#5 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &type->i_mutex_dir_key#5 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &type->i_mutex_dir_key#5 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &fsnotify_mark_srcu &rcu_state.expedited_wq irq_context: 0 &fsnotify_mark_srcu &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &fsnotify_mark_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &fsnotify_mark_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#12 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#16 irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 vlan_ioctl_mutex rtnl_mutex uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 ppp_mutex rcu_read_lock &pool->lock irq_context: 0 ppp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 ppp_mutex rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 ppp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 ppp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 ppp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock sb_writers#4 &____s->seqcount#2 irq_context: 0 &mm->mmap_lock sb_writers#4 &____s->seqcount irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &c->lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_es_lock &n->list_lock irq_context: 0 (wq_completion)ext4-rsv-conversion (work_completion)(&ei->i_rsv_conversion_work) jbd2_handle &ei->i_data_sem &ei->i_es_lock &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 tomoyo_ss mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex dev_hotplug_mutex &rq->__lock irq_context: 0 rtnl_mutex dev_hotplug_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 pernet_ops_rwsem uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock &ep->wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock &meta->lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock free_vmap_area_lock kfence_freelist_lock irq_context: 0 (wq_completion)events drain_vmap_work vmap_purge_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&q->timer) rcu_read_lock pool_lock#2 irq_context: softirq (&q->timer) &zone->lock irq_context: softirq (&q->timer) &zone->lock &____s->seqcount irq_context: 0 tomoyo_ss console_owner_lock irq_context: 0 tomoyo_ss console_owner irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex stock_lock irq_context: 0 pernet_ops_rwsem devices_rwsem rdma_nets_rwsem &device->compat_devs_mutex pcpu_lock stock_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI (console_sem).lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI console_lock console_srcu console_owner_lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI console_lock console_srcu console_owner irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI console_lock console_srcu console_owner &port_lock_key irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI console_lock console_srcu console_owner console_owner_lock irq_context: 0 pernet_ops_rwsem nf_ct_proto_mutex defrag6_mutex nf_hook_mutex per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &root->kernfs_iattr_rwsem &rcu_state.expedited_wq irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &____s->seqcount irq_context: softirq (&tw->tw_timer) per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex text_mutex pool_lock#2 irq_context: softirq (&tw->tw_timer) &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &f->f_pos_lock (console_sem).lock irq_context: 0 &f->f_pos_lock console_lock console_srcu console_owner_lock irq_context: 0 &f->f_pos_lock console_lock console_srcu console_owner irq_context: 0 &f->f_pos_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 &f->f_pos_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock (console_sem).lock irq_context: 0 &mm->mmap_lock console_owner_lock irq_context: 0 &mm->mmap_lock console_owner irq_context: 0 &f->f_pos_lock console_owner_lock irq_context: 0 &mm->mmap_lock console_lock console_srcu console_owner_lock irq_context: 0 &mm->mmap_lock console_lock console_srcu console_owner irq_context: 0 &f->f_pos_lock console_owner irq_context: 0 &mm->mmap_lock console_lock console_srcu console_owner &port_lock_key irq_context: 0 &mm->mmap_lock console_lock console_srcu console_owner console_owner_lock irq_context: 0 &f->f_pos_lock &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: 0 br_ioctl_mutex rtnl_mutex lock kernfs_idr_lock &c->lock irq_context: 0 br_ioctl_mutex rtnl_mutex &sem->wait_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &p->pi_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &p->pi_lock &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex &root->kernfs_rwsem &sem->wait_lock irq_context: 0 br_ioctl_mutex rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 br_ioctl_mutex rtnl_mutex remove_cache_srcu &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 br_ioctl_mutex rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 br_ioctl_mutex rtnl_mutex remove_cache_srcu pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 br_ioctl_mutex rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 br_ioctl_mutex rtnl_mutex uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->hash_lock rlock-AF_NETLINK irq_context: 0 br_ioctl_mutex rtnl_mutex &br->hash_lock rcu_read_lock &ei->socket.wq.wait irq_context: 0 br_ioctl_mutex rtnl_mutex &br->hash_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->hash_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex &br->hash_lock rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex nf_hook_mutex irq_context: 0 br_ioctl_mutex rtnl_mutex nf_hook_mutex fs_reclaim irq_context: 0 br_ioctl_mutex rtnl_mutex nf_hook_mutex fs_reclaim &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex nf_hook_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 br_ioctl_mutex rtnl_mutex nf_hook_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 br_ioctl_mutex rtnl_mutex nf_hook_mutex stock_lock irq_context: 0 br_ioctl_mutex rtnl_mutex nf_hook_mutex pool_lock#2 irq_context: 0 br_ioctl_mutex rtnl_mutex cpu_hotplug_lock irq_context: 0 br_ioctl_mutex rtnl_mutex rlock-AF_NETLINK irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &ei->socket.wq.wait &p->pi_lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock irq_context: 0 br_ioctl_mutex rtnl_mutex rcu_read_lock &ei->socket.wq.wait &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#69 fs_reclaim irq_context: 0 kn->active#69 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#69 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#69 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#69 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET &____s->seqcount irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET rcu_read_lock pool_lock#2 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock pool_lock#2 irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock tk_core.seq.seqcount irq_context: softirq (&icsk->icsk_retransmit_timer) slock-AF_INET rcu_read_lock rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &____s->seqcount irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &zone->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &zone->lock &____s->seqcount irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP/2 irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP/2 slock-AF_BLUETOOTH-BTPROTO_L2CAP irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP/2 &ei->socket.wq.wait irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &____s->seqcount irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&pool->idle_cull_work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 wq_pool_attach_mutex &cfs_rq->removed.lock irq_context: 0 wq_pool_attach_mutex &obj_hash[i].lock irq_context: 0 wq_pool_attach_mutex pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 pgd_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 key irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 pcpu_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 percpu_counters_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 pcpu_lock stock_lock irq_context: 0 br_ioctl_mutex rtnl_mutex &mm->mmap_lock &c->lock irq_context: 0 rtnl_mutex &tn->idrinfo->lock#7 irq_context: 0 rtnl_mutex &tn->idrinfo->lock#7 fs_reclaim irq_context: 0 rtnl_mutex &tn->idrinfo->lock#7 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 rtnl_mutex &tn->idrinfo->lock#7 &c->lock irq_context: 0 rtnl_mutex &tn->idrinfo->lock#7 pool_lock#2 irq_context: 0 rtnl_mutex &tn->idrinfo->lock#7 &obj_hash[i].lock irq_context: 0 sk_lock-AF_PHONET &f->f_lock irq_context: 0 sk_lock-AF_PHONET &f->f_lock fasync_lock irq_context: 0 sk_lock-AF_PHONET &f->f_lock fasync_lock &new->fa_lock irq_context: 0 sk_lock-AF_PHONET &f->f_lock fasync_lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock fs_reclaim irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock &____s->seqcount irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock stock_lock irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock &mm->page_table_lock irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock ptlock_ptr(page) irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock ptlock_ptr(page)#2 irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 rcu_read_lock rcu_read_lock &pn->hash_lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock crngs.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &tcp_hashinfo.bhash[i].lock stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 hci_sk_list.lock irq_context: softirq (&n->timer) &obj_hash[i].lock pool_lock irq_context: 0 sk_lock-AF_INET6 &sighand->siglock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock &rcu_state.gp_wq irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_NETLINK rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u8.lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) genl_mutex irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) genl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) genl_mutex pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&nlk->work) &obj_hash[i].lock pool_lock irq_context: 0 kn->active#5 rcu_read_lock &rcu_state.gp_wq irq_context: 0 kn->active#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 kn->active#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 kn->active#5 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle &sbi->s_orphan_lock &lock->wait_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock crngs.lock irq_context: softirq rcu_read_lock rcu_read_lock &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock batched_entropy_u8.lock crngs.lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key &n->list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &idev->mc_lock &vlan_netdev_addr_lock_key &n->list_lock &c->lock irq_context: softirq (&icsk->icsk_delack_timer) slock-AF_INET6 &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: softirq rcu_read_lock rcu_read_lock batched_entropy_u32.lock crngs.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 remove_cache_srcu irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 remove_cache_srcu quarantine_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 remove_cache_srcu &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 remove_cache_srcu &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 remove_cache_srcu &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock crngs.lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 ppp_mutex text_mutex text_mutex.wait_lock irq_context: 0 ppp_mutex text_mutex &rq->__lock irq_context: 0 ppp_mutex text_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&(&kfence_timer)->work) cpu_hotplug_lock jump_label_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 rcu_read_lock pool_lock#2 irq_context: 0 ppp_mutex remove_cache_srcu irq_context: 0 ppp_mutex remove_cache_srcu quarantine_lock irq_context: 0 ppp_mutex remove_cache_srcu &c->lock irq_context: 0 ppp_mutex remove_cache_srcu &n->list_lock irq_context: 0 ppp_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 ppp_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 ppp_mutex remove_cache_srcu &rq->__lock irq_context: 0 ppp_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &n->list_lock irq_context: 0 &root->kernfs_iattr_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &root->kernfs_iattr_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &root->kernfs_iattr_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 rcu_read_lock &rcu_state.gp_wq irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_read_lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#13 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &(&bp->lock)->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &(&bp->lock)->lock &____s->seqcount#13 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &(&bp->lock)->lock &____s->seqcount#13 pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rhashtable_bucket irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &q->lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &q->lock#2 &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &q->lock#2 &base->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &q->lock#2 &base->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &q->lock#2 rcu_read_lock rcu_read_lock rhashtable_bucket irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &q->lock#2 rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &q->lock#2 rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &q->lock#2 rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &q->lock#2 rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &q->lock#2 rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &q->lock#2 rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &q->lock#2 pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_node_0 irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg2#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&bond->mii_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: softirq (&journal->j_commit_timer) &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 bt_proto_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 bt_proto_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 bt_proto_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 bt_proto_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 bt_proto_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 ppp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex &ppp->rlock irq_context: 0 ppp_mutex (console_sem).lock irq_context: 0 ppp_mutex console_lock console_srcu console_owner_lock irq_context: 0 ppp_mutex console_lock console_srcu console_owner irq_context: 0 ppp_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 ppp_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock crngs.lock irq_context: 0 &ep->mtx &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh &meta->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock_bh kfence_freelist_lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sighand->siglock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 ppp_mutex rtnl_mutex batched_entropy_u8.lock irq_context: 0 ppp_mutex rtnl_mutex kfence_freelist_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI quarantine_lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock crngs.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI &list->lock#7 irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI rcu_read_lock &pool->lock/1 irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) hci_sk_list.lock irq_context: 0 ebt_mutex rcu_read_lock &rq->__lock irq_context: 0 ebt_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock &n->list_lock &c->lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex uevent_sock_mutex.wait_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cgroup_threadgroup_rwsem &rcu_state.expedited_wq irq_context: 0 cgroup_threadgroup_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 cgroup_threadgroup_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 cgroup_threadgroup_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem batched_entropy_u8.lock crngs.lock irq_context: 0 ppp_mutex rtnl_mutex fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->alb_work)->work) rcu_node_0 irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->alb_work)->work) &rq->__lock irq_context: softirq &(&dm_bufio_cleanup_old_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&(&hdev->cmd_timer)->work) rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) hci_dev_list_lock irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) rlock-AF_BLUETOOTH irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) wlock-AF_BLUETOOTH irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) pool_lock#2 irq_context: 0 (wq_completion)hci3#2 (work_completion)(&hdev->cmd_work) &dir->lock irq_context: 0 (wq_completion)events_unbound &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 pcpu_alloc_mutex &rcu_state.expedited_wq irq_context: 0 pcpu_alloc_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 pcpu_alloc_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 pcpu_alloc_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI rcu_read_lock rcu_node_0 irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI rcu_read_lock &rq->__lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex rcu_node_0 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 ppp_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 ppp_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 ppp_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 ppp_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 ppp_mutex rtnl_mutex rcu_read_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock base_crng.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock crngs.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock crngs.lock base_crng.lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) kfence_freelist_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &pcp->lock &zone->lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &local->key_mtx rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &fsnotify_mark_srcu &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 &fsnotify_mark_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3 remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &rcu_state.expedited_wq irq_context: 0 ppp_mutex &zone->lock irq_context: 0 ppp_mutex &zone->lock &____s->seqcount irq_context: 0 sb_writers#3 &mm->mmap_lock &rq->__lock irq_context: 0 sb_writers#3 &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_state.barrier_mutex &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock remove_cache_srcu irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock remove_cache_srcu quarantine_lock irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock remove_cache_srcu &c->lock irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock remove_cache_srcu &n->list_lock irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock remove_cache_srcu &rq->__lock irq_context: softirq rcu_callback pcpu_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock crngs.lock irq_context: 0 ppp_mutex &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 rcu_node_0 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock crngs.lock irq_context: 0 &kernfs_locks->open_file_mutex[count] &lock->wait_lock irq_context: 0 rtnl_mutex &pn->all_ppp_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work rcu_node_0 irq_context: 0 (wq_completion)events_unbound (reaper_work).work &rcu_state.expedited_wq irq_context: 0 (wq_completion)events_unbound (reaper_work).work &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &disk->open_mutex rcu_node_0 irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&peer->transmit_packet_work) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->managed_work)->work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &base->lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock &n->list_lock irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 ppp_mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 ppp_mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 ppp_mutex remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &vma->vm_lock->lock pool_lock irq_context: 0 rtnl_mutex team->team_lock_key#6 &n->list_lock irq_context: 0 rtnl_mutex team->team_lock_key#6 &n->list_lock &c->lock irq_context: 0 rtnl_mutex team->team_lock_key#6 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &x->wait#3 &p->pi_lock &rq->__lock &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &x->wait#3 &p->pi_lock &rq->__lock &base->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&sdp->work) &x->wait#3 &p->pi_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 ppp_mutex free_vmap_area_lock &obj_hash[i].lock irq_context: 0 ppp_mutex free_vmap_area_lock pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#9 &rcu_state.expedited_wq irq_context: 0 ppp_mutex &obj_hash[i].lock pool_lock irq_context: 0 ppp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12/4 &rq->__lock irq_context: 0 &mm->mmap_lock fs_reclaim rcu_node_0 irq_context: 0 &mm->mmap_lock fs_reclaim &rcu_state.expedited_wq irq_context: 0 &mm->mmap_lock fs_reclaim &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &mm->mmap_lock fs_reclaim &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock fs_reclaim &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex.wait_lock irq_context: 0 &mm->mmap_lock rcu_read_lock stock_lock irq_context: 0 &mm->mmap_lock rcu_read_lock pcpu_lock stock_lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &ul->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#8 &of->mutex kn->active#5 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 rcu_read_lock &ei->socket.wq.wait &p->pi_lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock fastopen_seqlock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 rcu_read_lock fastopen_seqlock fastopen_seqlock.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &n->list_lock &c->lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock crngs.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock tk_core.seq.seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock crngs.lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &c->lock irq_context: softirq rcu_read_lock &dir->lock irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 rcu_read_lock pool_lock#2 irq_context: 0 &mm->mmap_lock ptlock_ptr(page)#2 &obj_hash[i].lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 tracepoints_mutex cpu_hotplug_lock jump_label_mutex.wait_lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock &p->pi_lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock &p->pi_lock &rq->__lock irq_context: 0 tracepoints_mutex cpu_hotplug_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem fs_reclaim irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem kernfs_rename_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &root->kernfs_rwsem &obj_hash[i].lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx kernfs_rename_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex rlock-AF_NETLINK irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex rcu_read_lock &ei->socket.wq.wait &ep->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex rcu_read_lock rcu_node_0 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex rcu_read_lock &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx uevent_sock_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3/1 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3/1 rename_lock.seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3/1 fs_reclaim irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3/1 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3/1 stock_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3/1 &c->lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3/1 pool_lock#2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3/1 &dentry->d_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3/1 rcu_read_lock rename_lock.seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3/1 &dentry->d_lock &wq irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3/1 tk_core.seq.seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3/1 rename_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3/1 rename_lock rename_lock.seqcount irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3/1 rename_lock rename_lock.seqcount &dentry->d_lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3/1 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3/1 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 &dentry->d_lock/3 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3/1 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 &dentry->d_lock/3 &____s->seqcount#4 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3/1 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 &dentry->d_lock/3 &____s->seqcount#4 &____s->seqcount#4/1 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3/1 rename_lock rename_lock.seqcount &dentry->d_lock/2 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3/1 rename_lock rename_lock.seqcount &dentry->d_lock/2 &dentry->d_lock/3 irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3/1 &rq->__lock irq_context: 0 cb_lock rtnl_mutex &rdev->wiphy.mtx &sb->s_type->i_mutex_key#3/1 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &type->i_mutex_dir_key#4 rename_lock irq_context: 0 &type->i_mutex_dir_key#4 rename_lock rename_lock.seqcount irq_context: 0 &type->i_mutex_dir_key#4 rename_lock rename_lock.seqcount &dentry->d_lock irq_context: 0 &type->i_mutex_dir_key#4 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 irq_context: 0 &type->i_mutex_dir_key#4 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 &dentry->d_lock/3 irq_context: 0 &type->i_mutex_dir_key#4 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 &dentry->d_lock/3 &____s->seqcount#4 irq_context: 0 &type->i_mutex_dir_key#4 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 &dentry->d_lock/3 &____s->seqcount#4 &____s->seqcount#4/1 irq_context: 0 &type->i_mutex_dir_key#4 rename_lock rename_lock.seqcount &dentry->d_lock &dentry->d_lock/2 &dentry->d_lock/3 &wq irq_context: 0 &type->i_mutex_dir_key#4 rename_lock rename_lock.seqcount &dentry->d_lock/2 irq_context: 0 &type->i_mutex_dir_key#4 rename_lock rename_lock.seqcount &dentry->d_lock/2 &dentry->d_lock/3 irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex ovs_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex rtnl_mutex &xa->xa_lock#3 &c->lock irq_context: 0 ppp_mutex rtnl_mutex &xa->xa_lock#3 pool_lock#2 irq_context: 0 sb_writers#8 remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock &n->list_lock irq_context: 0 sk_lock-AF_INET6 rcu_read_lock rcu_read_lock &n->lock &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem pgd_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem stock_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem key irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem pcpu_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem percpu_counters_lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem pcpu_lock stock_lock irq_context: 0 ppp_mutex rtnl_mutex &xa->xa_lock#3 &n->list_lock irq_context: 0 ppp_mutex rtnl_mutex &xa->xa_lock#3 &n->list_lock &c->lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &sem->wait_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock quarantine_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 jbd2_handle rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &n->list_lock &c->lock irq_context: 0 rcu_state.barrier_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rcu_state.gp_wq irq_context: 0 rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex remove_cache_srcu rcu_read_lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#19 &rq->__lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 batched_entropy_u8.lock crngs.lock irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 remove_cache_srcu pool_lock#2 irq_context: 0 ppp_mutex rtnl_mutex &pn->all_ppp_mutex rcu_read_lock rcu_node_0 irq_context: 0 ppp_mutex rtnl_mutex &pn->all_ppp_mutex rcu_read_lock &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex &pn->all_ppp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &tfile->napi_mutex rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 &tfile->napi_mutex rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 &tfile->napi_mutex rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#7 irq_context: 0 &tfile->napi_mutex rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 &tfile->napi_mutex rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 &tfile->napi_mutex rcu_read_lock rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 &tfile->napi_mutex rcu_read_lock rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] irq_context: 0 &tfile->napi_mutex rcu_read_lock rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 irq_context: 0 &tfile->napi_mutex rcu_read_lock rcu_read_lock rcu_read_lock &nf_conntrack_locks[i] &nf_conntrack_locks[i]/1 batched_entropy_u8.lock irq_context: 0 &tfile->napi_mutex rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 &tfile->napi_mutex rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_BLUETOOTH-BTPROTO_HCI rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &nft_net->commit_mutex &rq->__lock irq_context: 0 rtnl_mutex &nft_net->commit_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (stats_flush_dwork).work &rq->__lock irq_context: 0 (wq_completion)events_unbound (stats_flush_dwork).work &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss mount_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 tomoyo_ss mount_lock rcu_read_lock rename_lock.seqcount irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx rcu_node_0 irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &rq->__lock irq_context: 0 rtnl_mutex &rdev->wiphy.mtx &wdev->mtx &local->mtx &local->chanctx_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg1#2 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &c->lock irq_context: 0 fill_pool_map-wait-type-override &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &sem->wait_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex cpu_hotplug_lock xps_map_mutex &rq->__lock irq_context: 0 rtnl_mutex cpu_hotplug_lock xps_map_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle pgd_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle stock_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle key irq_context: 0 sb_writers#4 sb_internal jbd2_handle pcpu_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle percpu_counters_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle pcpu_lock stock_lock irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 nf_conntrack_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (work_completion)(&w->work)#2 nf_conntrack_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) rcu_node_0 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &dev->tx_global_lock &qdisc_xmit_lock_key#4 irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &____s->seqcount irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock rcu_read_lock pool_lock#2 irq_context: softirq (&tsc_sync_check_timer) irq_context: softirq (&tsc_sync_check_timer) &obj_hash[i].lock irq_context: softirq (&tsc_sync_check_timer) &base->lock irq_context: softirq (&tsc_sync_check_timer) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock &meta->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&gc_work->dwork)->work) rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rcu_node_0 irq_context: 0 &pipe->mutex/1 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 &pipe->mutex/1 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &pipe->mutex/1 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &pipe->mutex/1 &mm->mmap_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex fs_reclaim &rq->__lock irq_context: 0 ppp_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &fsnotify_mark_srcu fs_reclaim &rq->__lock irq_context: 0 &fsnotify_mark_srcu fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &hdev->req_lock &n->list_lock irq_context: 0 &hdev->req_lock &n->list_lock &c->lock irq_context: 0 &hdev->req_lock rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &dentry->d_lock &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &dentry->d_lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 rcu_read_lock &dentry->d_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &rcu_state.expedited_wq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 jbd2_handle &ei->i_data_sem &ei->i_es_lock key#8 irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex quarantine_lock irq_context: 0 sb_writers#4 tomoyo_ss batched_entropy_u8.lock irq_context: 0 sb_writers#4 tomoyo_ss kfence_freelist_lock irq_context: 0 sb_writers#4 tomoyo_ss &meta->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pack_mutex pack_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pack_mutex &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pack_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pack_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&aux->work) pack_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex rtnl_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 ppp_mutex rtnl_mutex &root->kernfs_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 ppp_mutex rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex &root->kernfs_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock remove_cache_srcu &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 &chan->lock/1 quarantine_lock irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override &n->list_lock irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 &dentry->d_lock &wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 &dentry->d_lock &wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#9 &sb->s_type->i_lock_key#23 &dentry->d_lock &wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq net/ipv4/devinet.c:474 irq_context: softirq net/ipv4/devinet.c:474 rcu_read_lock &pool->lock irq_context: softirq net/ipv4/devinet.c:474 rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: softirq net/ipv4/devinet.c:474 rcu_read_lock &pool->lock &p->pi_lock irq_context: softirq net/ipv4/devinet.c:474 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: softirq net/ipv4/devinet.c:474 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET/1 key#26 irq_context: 0 &kernfs_locks->open_file_mutex[count] &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock mmu_notifier_invalidate_range_start &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &rq->__lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &rq->__lock &base->lock irq_context: 0 (wq_completion)events_unbound (reaper_work).work &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex remove_cache_srcu irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex remove_cache_srcu quarantine_lock irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex remove_cache_srcu &c->lock irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex remove_cache_srcu &n->list_lock irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 ppp_mutex rtnl_mutex uevent_sock_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &pn->all_ppp_mutex &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &batadv_netdev_addr_lock_key/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex &batadv_netdev_addr_lock_key/1 krc.lock irq_context: 0 rtnl_mutex &idev->mc_lock &batadv_netdev_addr_lock_key/1 irq_context: 0 rtnl_mutex &idev->mc_lock &batadv_netdev_addr_lock_key/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock &batadv_netdev_addr_lock_key/1 krc.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock key#16 &bat_priv->softif_vlan_list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock key#16 &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock key#16 pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock key#16 krc.lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &bat_priv->tt.commit_lock &bat_priv->tt.last_changeset_lock &obj_hash[i].lock irq_context: 0 ppp_mutex &ppp->wlock &ppp->rlock &obj_hash[i].lock irq_context: 0 ppp_mutex &ppp->wlock &ppp->rlock rcu_read_lock &pool->lock irq_context: 0 ppp_mutex &ppp->wlock &ppp->rlock rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 ppp_mutex &ppp->wlock &ppp->rlock rcu_read_lock &pool->lock pool_lock#2 irq_context: 0 ppp_mutex &ppp->wlock &ppp->rlock rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 ppp_mutex &ppp->wlock &ppp->rlock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 ppp_mutex &ppp->wlock &ppp->rlock rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &xt[i].mutex fs_reclaim &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex rcu_read_lock &p->alloc_lock irq_context: 0 &sb->s_type->i_mutex_key#10 &n->list_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&app->join_timer) &app->lock batched_entropy_u8.lock irq_context: softirq (&app->join_timer) &app->lock kfence_freelist_lock irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nfnl_subsys_ctnetlink rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: 0 nfnl_subsys_ctnetlink rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex pgd_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex stock_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex key irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex pcpu_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex percpu_counters_lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex pcpu_lock stock_lock irq_context: 0 ppp_mutex remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &meta->lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh kfence_freelist_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock pcpu_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->bla.work)->work) rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &dev->mutex &rq->__lock irq_context: 0 rtnl_mutex &dev->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &vma->vm_lock->lock pool_lock#2 irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &____s->seqcount#2 irq_context: 0 rtnl_mutex (work_completion)(&(&idev->mc_report_work)->work) &rq->__lock irq_context: 0 rtnl_mutex (work_completion)(&(&idev->mc_report_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sighand->siglock batched_entropy_u8.lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sighand->siglock &n->list_lock irq_context: 0 &sighand->siglock &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 &net->sctp.addr_wq_lock slock-AF_INET6/1 key#26 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->nc.work)->work) &base->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#8 rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#8 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &meta->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock kfence_freelist_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &mm->mmap_lock &rq->__lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &rq->__lock &base->lock irq_context: 0 &mm->mmap_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 &p->lock rcu_read_lock &rcu_state.expedited_wq irq_context: 0 &p->lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &p->lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &p->lock rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex rtnl_mutex &root->kernfs_rwsem &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex &root->kernfs_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &tsk->futex_exit_mutex &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &rq->__lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#19 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock krc.lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock krc.lock &base->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock krc.lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock &____s->seqcount irq_context: 0 &ep->mtx gdp_mutex &n->list_lock irq_context: 0 &ep->mtx gdp_mutex &n->list_lock &c->lock irq_context: 0 rtnl_mutex gdp_mutex gdp_mutex.wait_lock irq_context: 0 &ep->mtx gdp_mutex.wait_lock irq_context: 0 &ep->mtx lock kernfs_idr_lock &c->lock irq_context: 0 &ep->mtx lock kernfs_idr_lock &n->list_lock irq_context: 0 &ep->mtx lock kernfs_idr_lock &n->list_lock &c->lock irq_context: 0 &ep->mtx &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &ep->mtx &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sig->cred_guard_mutex tomoyo_ss &mm->mmap_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex &mm->mmap_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex &hwstats->hwsdev_list_lock &rq->__lock irq_context: 0 rtnl_mutex &hwstats->hwsdev_list_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &u->iolock pcpu_lock stock_lock irq_context: 0 &mm->mmap_lock remove_cache_srcu rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 rtnl_mutex team->team_lock_key#2 &n->list_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &sb->s_type->i_mutex_key#9 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#9 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#9 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP &rq->__lock irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 &sb->s_type->i_mutex_key#9 &cfs_rq->removed.lock irq_context: 0 &sighand->siglock rcu_read_lock pool_lock#2 irq_context: softirq (&peer->timer_zero_key_material) irq_context: softirq (&peer->timer_zero_key_material) rcu_read_lock_bh rcu_read_lock &pool->lock/1 irq_context: softirq (&peer->timer_zero_key_material) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &obj_hash[i].lock irq_context: softirq (&peer->timer_zero_key_material) rcu_read_lock_bh rcu_read_lock &pool->lock/1 pool_lock#2 irq_context: softirq (&peer->timer_zero_key_material) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock irq_context: softirq (&peer->timer_zero_key_material) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock irq_context: softirq (&peer->timer_zero_key_material) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->clear_peer_work) irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->clear_peer_work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->clear_peer_work) &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->clear_peer_work) irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->clear_peer_work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->clear_peer_work) &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->clear_peer_work) irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->clear_peer_work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->clear_peer_work) &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock crngs.lock irq_context: 0 bpf_module_mutex irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock batched_entropy_u32.lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock pool_lock#2 irq_context: 0 &sb->s_type->i_mutex_key#9 stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock &cfs_rq->removed.lock irq_context: 0 dup_mmap_sem &mm->mmap_lock &mm->mmap_lock/1 &anon_vma->rwsem rcu_read_lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock &cfs_rq->removed.lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &n->list_lock irq_context: softirq rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh &sch->q.lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key &c->lock irq_context: 0 (wq_completion)bond1#2 irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key/1 &bridge_netdev_addr_lock_key irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 rcu_read_lock &bridge_netdev_addr_lock_key irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 rcu_read_lock &bridge_netdev_addr_lock_key pool_lock#2 irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock pool_lock#2 irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &____s->seqcount irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock pool_lock#2 irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond1#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &fsnotify_mark_srcu &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 ppp_mutex rtnl_mutex smc_ib_devices.mutex &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex smc_ib_devices.mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 bt_proto_lock batched_entropy_u8.lock irq_context: 0 bt_proto_lock kfence_freelist_lock irq_context: softirq (&ifibss->timer) rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond2 irq_context: 0 (wq_completion)bond2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 rcu_read_lock &bridge_netdev_addr_lock_key &c->lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 ppp_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 tomoyo_ss remove_cache_srcu rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 tomoyo_ss remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 tomoyo_ss remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 ppp_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond3 irq_context: 0 (wq_completion)bond3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &dev->tx_global_lock &qdisc_xmit_lock_key#3 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &bond->stats_lock irq_context: 0 (wq_completion)bond4#2 irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex pcpu_lock stock_lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond4#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 ppp_mutex rtnl_mutex &rcu_state.expedited_wq irq_context: 0 ppp_mutex rtnl_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sk_lock-AF_CAIF &obj_hash[i].lock pool_lock irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 &tcp_hashinfo.bhash[i].lock quarantine_lock irq_context: 0 (wq_completion)bond5#3 irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond5#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond6#3 irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond6#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: softirq &(&bat_priv->orig_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &obj_hash[i].lock irq_context: softirq &(&bat_priv->orig_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &base->lock irq_context: softirq &(&bat_priv->orig_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx remove_cache_srcu irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx remove_cache_srcu &c->lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &local->sta_mtx remove_cache_srcu &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock irq_context: 0 rcu_read_lock_bh dev->qdisc_tx_busylock ?: &qdisc_tx_busylock _xmit_ETHER#2 rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond7#3 irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond7#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ei->i_prealloc_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex mmu_notifier_invalidate_range_start irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &c->lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &mapping->private_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ret->b_state_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &ret->b_state_lock &journal->j_list_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &pa->pa_lock irq_context: 0 &sb->s_type->i_mutex_key#8 &sbi->s_writepages_rwsem jbd2_handle &ei->i_data_sem &lg->lg_mutex &lg->lg_prealloc_lock irq_context: 0 rtnl_mutex dev_pm_qos_sysfs_mtx dev_pm_qos_mtx &rq->__lock irq_context: 0 rtnl_mutex dev_pm_qos_sysfs_mtx dev_pm_qos_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_BLUETOOTH-BTPROTO_HCI slock-AF_BLUETOOTH-BTPROTO_HCI &sk->sk_lock.wq#2 irq_context: 0 slock-AF_BLUETOOTH-BTPROTO_HCI &sk->sk_lock.wq#2 irq_context: 0 slock-AF_BLUETOOTH-BTPROTO_HCI &sk->sk_lock.wq#2 &p->pi_lock irq_context: 0 slock-AF_BLUETOOTH-BTPROTO_HCI &sk->sk_lock.wq#2 &p->pi_lock &rq->__lock irq_context: 0 slock-AF_BLUETOOTH-BTPROTO_HCI &sk->sk_lock.wq#2 &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex wq_pool_mutex &rq->__lock irq_context: 0 rtnl_mutex wq_pool_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond8#2 irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &mm->mmap_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond8#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 rcu_node_0 irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex batched_entropy_u8.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 uevent_sock_mutex kfence_freelist_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)bond9 irq_context: 0 (wq_completion)bond9 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond9 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond9 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond9 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond9 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock irq_context: 0 ppp_mutex &ppp->wlock &ppp->rlock &obj_hash[i].lock pool_lock irq_context: 0 ppp_mutex &ppp->wlock &ppp->rlock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 ppp_mutex &ppp->wlock &ppp->rlock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 ppp_mutex &ppp->wlock &ppp->rlock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 ppp_mutex &ppp->wlock &ppp->rlock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 ppp_mutex &ppp->wlock &ppp->rlock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 ppp_mutex &pcp->lock &zone->lock irq_context: 0 sb_writers#3 oom_adj_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#3 oom_adj_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#3 oom_adj_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#3 oom_adj_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rtnl_mutex rtnl_mutex.wait_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rtnl_mutex &pool->lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond10 irq_context: 0 (wq_completion)bond10 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond10 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex kfence_freelist_lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &meta->lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond10 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond10 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond10 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rtnl_mutex &pool->lock &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rtnl_mutex &pool->lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) rtnl_mutex &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &br->hash_lock &obj_hash[i].lock pool_lock irq_context: 0 rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex &br->hash_lock rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&pwq->unbound_release_work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &pool->lock/1 rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 ppp_mutex rtnl_mutex gdp_mutex &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex gdp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key &n->list_lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &n->list_lock &c->lock irq_context: 0 cb_lock genl_mutex rcu_state.exp_mutex rcu_node_0 irq_context: 0 cb_lock genl_mutex rcu_state.exp_mutex &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rcu_state.exp_mutex &rq->__lock irq_context: 0 cb_lock genl_mutex rcu_state.exp_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock irq_context: 0 cb_lock genl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &obj_hash[i].lock irq_context: 0 cb_lock genl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 cb_lock genl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock irq_context: 0 cb_lock genl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cb_lock genl_mutex rcu_state.exp_mutex &rnp->exp_wq[3] irq_context: 0 cb_lock genl_mutex rcu_state.exp_mutex &rnp->exp_wq[0] irq_context: 0 cb_lock genl_mutex rcu_state.exp_mutex &cfs_rq->removed.lock irq_context: 0 cb_lock genl_mutex rcu_state.exp_mutex pool_lock#2 irq_context: 0 cb_lock genl_mutex rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 cb_lock genl_mutex rcu_state.exp_mutex &rnp->exp_wq[1] irq_context: softirq (&n->timer) rcu_read_lock rcu_read_lock &nf_nat_locks[i] irq_context: softirq (&n->timer) rcu_read_lock &obj_hash[i].lock irq_context: softirq (&n->timer) rcu_read_lock pool_lock#2 irq_context: 0 &nft_net->commit_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 &nft_net->commit_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nfnl_subsys_acct nlk_cb_mutex-NETFILTER irq_context: 0 nfnl_subsys_acct nlk_cb_mutex-NETFILTER fs_reclaim irq_context: 0 nfnl_subsys_acct nlk_cb_mutex-NETFILTER fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 nfnl_subsys_acct nlk_cb_mutex-NETFILTER pool_lock#2 irq_context: 0 nfnl_subsys_acct nlk_cb_mutex-NETFILTER rcu_read_lock pool_lock#2 irq_context: 0 nfnl_subsys_acct nlk_cb_mutex-NETFILTER rcu_read_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 nfnl_subsys_acct nlk_cb_mutex-NETFILTER rcu_read_lock rcu_read_lock_bh pool_lock#2 irq_context: 0 nfnl_subsys_acct nlk_cb_mutex-NETFILTER rlock-AF_NETLINK irq_context: 0 nfnl_subsys_acct nlk_cb_mutex-NETFILTER &obj_hash[i].lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key &n->list_lock irq_context: 0 rtnl_mutex &dev_addr_list_lock_key &n->list_lock &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 (netlink_chain).rwsem &nft_net->commit_mutex rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rcu_state.gp_wq irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &kernfs_locks->open_file_mutex[count] rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &cfs_rq->removed.lock irq_context: 0 ppp_mutex rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &obj_hash[i].lock irq_context: 0 &xt[i].mutex &mm->mmap_lock &obj_hash[i].lock pool_lock irq_context: 0 cb_lock genl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 cb_lock genl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock &mapping->i_mmap_rwsem &anon_vma->rwsem fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 nfnl_subsys_acct nlk_cb_mutex-NETFILTER &c->lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 rcu_node_0 irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg2#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &n->list_lock &c->lock irq_context: 0 ppp_mutex rtnl_mutex &pcp->lock &zone->lock irq_context: 0 ppp_mutex rtnl_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &dentry->d_lock &lru->node[i].lock rcu_read_lock &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &dentry->d_lock &lru->node[i].lock rcu_read_lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &rcu_state.expedited_wq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &ht->mutex &rq->__lock irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &n->list_lock irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &n->list_lock &c->lock irq_context: 0 &ei->i_data_sem rcu_node_0 irq_context: 0 vlan_ioctl_mutex &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond17 irq_context: 0 (wq_completion)bond17 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond17 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond17 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond17 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond18 irq_context: 0 (wq_completion)bond18 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond18 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond18 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond18 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 rtnl_mutex bus_type_sem &rq->__lock irq_context: 0 rtnl_mutex bus_type_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond8#3 irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 &n->list_lock &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond8#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 ppp_mutex rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 ppp_mutex rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 ppp_mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock &sem->wait_lock irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex &mm->mmap_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem lock#4 rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_mutex_key#8 mapping.invalidate_lock &mapping->i_mmap_rwsem lock#4 &obj_hash[i].lock irq_context: 0 (wq_completion)bond19 irq_context: 0 (wq_completion)bond19 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond19 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &n->list_lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond19 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond19 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#8 &of->mutex kn->active#5 rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond9#2 irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond9#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond20 irq_context: 0 (wq_completion)bond20 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond20 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq &(&bond->mcast_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 ppp_mutex rtnl_mutex &pn->all_ppp_mutex batched_entropy_u8.lock irq_context: 0 ppp_mutex rtnl_mutex &pn->all_ppp_mutex kfence_freelist_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &dev_addr_list_lock_key/1 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &dev_addr_list_lock_key/1 rcu_read_lock _xmit_ETHER irq_context: 0 vlan_ioctl_mutex rtnl_mutex &dev_addr_list_lock_key/1 rcu_read_lock _xmit_ETHER &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &dev_addr_list_lock_key/1 rcu_read_lock _xmit_ETHER pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex &dev_addr_list_lock_key/1 rcu_read_lock _xmit_ETHER &n->list_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex &dev_addr_list_lock_key/1 rcu_read_lock _xmit_ETHER &n->list_lock &c->lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex rtnl_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 vlan_ioctl_mutex rtnl_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 vlan_ioctl_mutex rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 ppp_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 ppp_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond20 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond20 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond20 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 kn->active#5 remove_cache_srcu rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&n->timer) rcu_read_lock &c->lock irq_context: softirq (&n->timer) rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &rq->__lock irq_context: 0 rtnl_mutex rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond10#2 irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond10#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &cookie->lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &cookie->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 rtnl_mutex &idev->mc_lock remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond21 irq_context: 0 (wq_completion)bond21 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond21 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 &____s->seqcount#2 irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond21 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond21 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond22 irq_context: 0 (wq_completion)bond22 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond22 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond22 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond22 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond22 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock kfence_freelist_lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->clear_peer_work) irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->clear_peer_work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->clear_peer_work) irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->clear_peer_work) &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->clear_peer_work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->clear_peer_work) &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->clear_peer_work) irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->clear_peer_work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->clear_peer_work) &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock batched_entropy_u8.lock crngs.lock base_crng.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)bond23 irq_context: 0 (wq_completion)bond23 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond23 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond23 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond23 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 ppp_mutex quarantine_lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond23 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond24 irq_context: 0 (wq_completion)bond24 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond24 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond24 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond24 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 cb_lock nlk_cb_mutex-GENERIC genl_mutex rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond25 irq_context: 0 (wq_completion)bond25 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond25 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond25 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond25 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond25 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond25 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 ppp_mutex rtnl_mutex remove_cache_srcu &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &rcu_state.expedited_wq &p->pi_lock irq_context: 0 key#27 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET key#27 irq_context: 0 (wq_completion)bond26 irq_context: 0 (wq_completion)bond26 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond26 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond26 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond26 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond26 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond27 irq_context: 0 (wq_completion)bond27 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond27 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &n->list_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond27 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond27 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond27 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond28 irq_context: 0 (wq_completion)bond28 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond28 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &rcu_state.expedited_wq irq_context: 0 (wq_completion)bond28 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond28 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond28 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond28 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 &fsnotify_mark_srcu batched_entropy_u8.lock irq_context: 0 &fsnotify_mark_srcu kfence_freelist_lock irq_context: 0 ppp_mutex rtnl_mutex &pn->all_ppp_mutex &____s->seqcount#2 irq_context: 0 ppp_mutex rtnl_mutex &pn->all_ppp_mutex &pcp->lock &zone->lock irq_context: 0 ppp_mutex rtnl_mutex &pn->all_ppp_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 ppp_mutex rtnl_mutex &pn->all_ppp_mutex &____s->seqcount irq_context: 0 (wq_completion)bond29 irq_context: 0 (wq_completion)bond29 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond29 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond29 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond29 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond29 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#8 fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &wq->mutex &rq->__lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &wq->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) tasklist_lock &n->list_lock irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key &____s->seqcount#2 irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key &____s->seqcount irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: softirq &(&flowtable->gc_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &obj_hash[i].lock irq_context: softirq &(&flowtable->gc_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &base->lock irq_context: softirq &(&flowtable->gc_work)->timer rcu_read_lock &pool->lock &p->pi_lock &rq->__lock &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &obj_hash[i].lock pool_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 &rcu_state.expedited_wq &p->pi_lock irq_context: 0 stock_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex remove_cache_srcu &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &rnp->exp_wq[3] irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock &____s->seqcount#2 irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 ppp_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu &rq->__lock irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex uevent_sock_mutex remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex &meta->lock irq_context: 0 ppp_mutex &fp->aux->used_maps_mutex &rq->__lock irq_context: 0 ppp_mutex &fp->aux->used_maps_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &dev_addr_list_lock_key &____s->seqcount#2 irq_context: 0 rtnl_mutex &dev_addr_list_lock_key &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rcu_read_lock &rcu_state.gp_wq irq_context: 0 rtnl_mutex &pn->all_ppp_mutex fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex &pn->all_ppp_mutex fill_pool_map-wait-type-override &c->lock irq_context: 0 rtnl_mutex &pn->all_ppp_mutex fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 rtnl_mutex &pn->all_ppp_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 rtnl_mutex &pn->all_ppp_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex &pn->all_ppp_mutex fill_pool_map-wait-type-override &n->list_lock irq_context: 0 rtnl_mutex &pn->all_ppp_mutex fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 &sb->s_type->i_mutex_key#12 &sb->s_type->i_mutex_key#12/4 rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &pnn->pndevs.lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) batched_entropy_u8.lock crngs.lock irq_context: 0 rtnl_mutex (switchdev_blocking_notif_chain).rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->xattr_sem &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 &ei->xattr_sem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex &cfs_rq->removed.lock irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex &obj_hash[i].lock irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex pool_lock#2 irq_context: 0 ppp_mutex rtnl_mutex pgd_lock irq_context: 0 ppp_mutex rtnl_mutex key irq_context: 0 ppp_mutex rtnl_mutex percpu_counters_lock irq_context: 0 ppp_mutex rtnl_mutex pcpu_lock stock_lock irq_context: 0 ppp_mutex rtnl_mutex pool_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_node_0 irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 ppp_mutex rtnl_mutex dpm_list_mtx &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex dpm_list_mtx &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sk_lock-AF_INET6 &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 fs_reclaim mmu_notifier_invalidate_range_start rcu_node_0 irq_context: 0 (wq_completion)bond17#2 irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond17#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 &sig->cred_guard_mutex rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond18#2 irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond2#2 irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond39 irq_context: 0 (wq_completion)bond39 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond39 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond39 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond39 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &n->list_lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond2#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond39 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond39 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond39 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond18#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond40 irq_context: 0 (wq_completion)bond40 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond40 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond40 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond40 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond40 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond40 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond40 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond15 irq_context: 0 (wq_completion)bond15 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond15 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond41 irq_context: 0 (wq_completion)bond41 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond41 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond41 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond41 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&in_dev->mr_ifc_timer) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond16 irq_context: 0 (wq_completion)bond16 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond16 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond16 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond42 irq_context: 0 (wq_completion)bond42 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond42 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond42 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond42 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#2 irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 rcu_read_lock &bridge_netdev_addr_lock_key &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 rcu_read_lock &bridge_netdev_addr_lock_key &____s->seqcount irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 tracepoints_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond3#2 irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond43 irq_context: 0 (wq_completion)bond43 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond43 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond43 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond43 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond43 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond43 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond43 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond3#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond19#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) purge_vmap_area_lock &meta->lock irq_context: 0 (wq_completion)events (work_completion)(&aux->work) purge_vmap_area_lock kfence_freelist_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_node_0 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond16 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond16 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond16 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond42 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond42 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond42 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond15 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond15 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond15 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond41 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond41 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond41 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond44 irq_context: 0 (wq_completion)bond44 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond44 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond44 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond44 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key &____s->seqcount#2 irq_context: 0 rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key &____s->seqcount irq_context: 0 (wq_completion)bond45 irq_context: 0 (wq_completion)bond45 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond45 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond45 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond45 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#2 irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond4#3 irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond46 irq_context: 0 (wq_completion)bond46 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond46 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond46 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond46 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#3 irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond46 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond46 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond46 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond17#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond45 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond45 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override pool_lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock rcu_read_lock &____s->seqcount#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond47 irq_context: 0 (wq_completion)bond47 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond47 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond47 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond47 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond20#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key batched_entropy_u8.lock irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key kfence_freelist_lock irq_context: 0 (wq_completion)bond48 irq_context: 0 (wq_completion)bond48 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond48 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond48 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond48 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond4#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond48 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond48 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond48 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond47 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond47 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond47 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond44 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond44 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond44 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond45 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond45 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond45 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 kn->active#70 fs_reclaim irq_context: 0 kn->active#70 fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 kn->active#70 stock_lock irq_context: 0 kn->active#70 &kernfs_locks->open_file_mutex[count] irq_context: 0 kn->active#70 &kernfs_locks->open_file_mutex[count] &rq->__lock irq_context: 0 kn->active#70 &kernfs_locks->open_file_mutex[count] &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 kn->active#70 &kernfs_locks->open_file_mutex[count] fs_reclaim irq_context: 0 kn->active#70 &kernfs_locks->open_file_mutex[count] fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond21#2 irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond49 irq_context: 0 (wq_completion)bond49 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond49 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond49 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond49 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#3 irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock pool_lock#2 irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock kfence_freelist_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &meta->lock irq_context: 0 kn->active#70 &kernfs_locks->open_file_mutex[count] &c->lock irq_context: 0 (wq_completion)bond50 irq_context: 0 (wq_completion)bond50 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond50 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond50 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond50 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond50 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond49 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond49 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond49 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond21#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond18#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 kn->active#70 &c->lock irq_context: 0 rtnl_mutex uevent_sock_mutex batched_entropy_u8.lock crngs.lock irq_context: softirq (&peer->timer_retransmit_handshake) rcu_read_lock_bh &obj_hash[i].lock irq_context: softirq (&peer->timer_retransmit_handshake) rcu_read_lock_bh &base->lock irq_context: softirq (&peer->timer_retransmit_handshake) rcu_read_lock_bh &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key &____s->seqcount#2 irq_context: 0 rtnl_mutex &bridge_netdev_addr_lock_key &____s->seqcount irq_context: 0 (wq_completion)bond22#2 irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &pnettable->lock &rq->__lock irq_context: 0 rtnl_mutex &pnettable->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#3 irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond19#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond22#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 tracepoints_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond23#2 irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 ppp_mutex &mm->mmap_lock &sem->wait_lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond23#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) rcu_node_0 irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex rcu_state.barrier_mutex &cfs_rq->removed.lock irq_context: 0 rtnl_mutex rcu_state.barrier_mutex &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_state.barrier_mutex pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->list_lock &c->lock irq_context: 0 rtnl_mutex &idev->mc_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)bond24#2 irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &n->list_lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond24#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 &child->perf_event_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 rcu_read_lock &obj_hash[i].lock irq_context: 0 ppp_mutex rtnl_mutex &pn->all_ppp_mutex remove_cache_srcu irq_context: 0 ppp_mutex rtnl_mutex &pn->all_ppp_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond25#2 irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem rcu_node_0 irq_context: 0 rtnl_mutex &rnp->exp_wq[1] irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rcu_state.expedited_wq irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond25#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock &bridge_netdev_addr_lock_key &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond26#2 irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) &group->avgs_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) &group->avgs_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) &group->avgs_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) &group->avgs_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) &group->avgs_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond27#2 irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &tbl->lock &____s->seqcount#2 irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &tbl->lock &____s->seqcount irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond26#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond27#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond28#2 irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond29#2 irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond28#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) &pcp->lock &zone->lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond30 irq_context: 0 (wq_completion)bond30 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond30 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond30 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond30 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond29#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond30 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 sb_writers#4 &sb->s_type->i_lock_key#22 &xa->xa_lock#8 fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 sb_writers#4 &sb->s_type->i_lock_key#22 &xa->xa_lock#8 fill_pool_map-wait-type-override &c->lock irq_context: 0 sb_writers#4 &sb->s_type->i_lock_key#22 &xa->xa_lock#8 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 sb_writers#4 &sb->s_type->i_lock_key#22 &xa->xa_lock#8 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 sb_writers#4 &sb->s_type->i_lock_key#22 &xa->xa_lock#8 fill_pool_map-wait-type-override pool_lock irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 &mm->mmap_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex &br->hash_lock nl_table_wait.lock &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex wq_pool_mutex &wq->mutex &rq->__lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock batched_entropy_u8.lock irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock rcu_read_lock kfence_freelist_lock irq_context: 0 rtnl_mutex nl_table_wait.lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 syslog_lock &rq->__lock irq_context: 0 rtnl_mutex bpf_devs_lock rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex bpf_devs_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) quarantine_lock irq_context: 0 lock prog_idr_lock &____s->seqcount#2 irq_context: 0 lock prog_idr_lock &____s->seqcount irq_context: softirq (&peer->timer_retransmit_handshake) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &n->lock irq_context: 0 (wq_completion)wg-crypt-wg1#2 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) fill_pool_map-wait-type-override &rq->__lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex rcu_read_lock rcu_node_0 irq_context: 0 ppp_mutex rtnl_mutex rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 ppp_mutex rtnl_mutex rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 ppp_mutex rtnl_mutex rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 ppp_mutex rtnl_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 ppp_mutex rtnl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 ppp_mutex rtnl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex rcu_read_lock &rq->__lock irq_context: 0 fill_pool_map-wait-type-override batched_entropy_u8.lock irq_context: 0 fill_pool_map-wait-type-override kfence_freelist_lock irq_context: 0 rtnl_mutex &ht->mutex rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex &ht->mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond75 irq_context: 0 (wq_completion)bond75 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond75 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&app->periodic_timer) &app->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq rcu_callback fill_pool_map-wait-type-override &c->lock irq_context: softirq (&app->periodic_timer) &app->lock fill_pool_map-wait-type-override pool_lock irq_context: softirq rcu_callback fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq rcu_callback fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond75 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond75 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond75 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond76 irq_context: 0 (wq_completion)bond76 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond76 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-crypt-wg0#2 (work_completion)(&peer->transmit_packet_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond77 irq_context: 0 (wq_completion)bond77 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond77 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond77 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond77 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond77 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond76 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond76 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond76 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 rtnl_mutex &xa->xa_lock#3 batched_entropy_u8.lock irq_context: 0 rtnl_mutex &xa->xa_lock#3 kfence_freelist_lock irq_context: 0 (wq_completion)bond78 irq_context: 0 (wq_completion)bond78 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond78 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 rtnl_mutex &root->kernfs_rwsem kernfs_idr_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)bond79 irq_context: 0 (wq_completion)bond79 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond79 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond78 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond78 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond79 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond79 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond79 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond80 irq_context: 0 (wq_completion)bond80 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond80 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond80 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond80 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond80 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond78 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &base->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->alb_work)->work) fill_pool_map-wait-type-override batched_entropy_u8.lock irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->alb_work)->work) fill_pool_map-wait-type-override kfence_freelist_lock irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->alb_work)->work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->alb_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond81 irq_context: 0 (wq_completion)bond81 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond81 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond81 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond81 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond81 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 rtnl_mutex rcu_state.barrier_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->dat.work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &cfs_rq->removed.lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex batched_entropy_u8.lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex kfence_freelist_lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &meta->lock irq_context: 0 (wq_completion)bond82 irq_context: 0 (wq_completion)bond82 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond82 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 cgroup_threadgroup_rwsem rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)bond11 irq_context: 0 (wq_completion)bond11 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond11 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond82 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond82 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond11 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond11 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond11 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond82 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &rcu_state.expedited_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 sb_internal jbd2_handle &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond12 irq_context: 0 (wq_completion)bond12 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond12 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond84 irq_context: 0 (wq_completion)bond84 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond84 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond84 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond84 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond84 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond12 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond12 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond12 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#3 irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock quarantine_lock irq_context: 0 (wq_completion)bond13 irq_context: 0 (wq_completion)bond13 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond13 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond85 irq_context: 0 (wq_completion)bond85 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond85 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond13 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond13 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond85 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond85 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond85 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond13 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 ppp_mutex rtnl_mutex &pnettable->lock &rq->__lock irq_context: 0 ppp_mutex rtnl_mutex &pnettable->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond27#3 irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond40#2 irq_context: 0 (wq_completion)bond40#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond40#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond40#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond40#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&rew->rew_work) rcu_state.exp_wake_mutex &rnp->exp_wq[2] &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond27#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond40#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond40#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond40#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 ppp_mutex rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond41#2 irq_context: 0 (wq_completion)bond41#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond41#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond41#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond41#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond41#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond41#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond41#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 tomoyo_ss rcu_read_lock pgd_lock irq_context: 0 tomoyo_ss rcu_read_lock stock_lock irq_context: 0 tomoyo_ss rcu_read_lock key irq_context: 0 tomoyo_ss rcu_read_lock pcpu_lock irq_context: 0 tomoyo_ss rcu_read_lock percpu_counters_lock irq_context: 0 tomoyo_ss rcu_read_lock pcpu_lock stock_lock irq_context: 0 (wq_completion)bond14 irq_context: 0 (wq_completion)bond14 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond14 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond42#2 irq_context: 0 (wq_completion)bond42#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond42#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond42#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond42#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond86 irq_context: 0 (wq_completion)bond86 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond86 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex pcpu_alloc_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex pcpu_alloc_mutex &pool->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex pcpu_alloc_mutex &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex pcpu_alloc_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond42#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond42#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond42#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond14 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond14 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond86 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond86 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond14 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond86 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_power_efficient (work_completion)(&(&tbl->gc_work)->work) &tbl->lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 link_idr_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 link_idr_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 link_idr_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond43#2 irq_context: 0 (wq_completion)bond43#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond43#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond43#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond43#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#3 irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &xa->xa_lock#8 &n->list_lock irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond44#2 irq_context: 0 (wq_completion)bond44#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond44#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond44#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond44#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond43#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond43#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond43#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond28#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond44#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond44#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond44#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock quarantine_lock irq_context: 0 (wq_completion)bond45#2 irq_context: 0 (wq_completion)bond45#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond45#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond45#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond45#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond45#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond45#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond45#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 &sb->s_type->i_mutex_key#9 remove_cache_srcu rcu_node_0 irq_context: 0 (wq_completion)bond46#2 irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#2 irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond87 irq_context: 0 (wq_completion)bond87 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond87 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#3 irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond47#2 irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq &(&nsim_dev->trap_data->trap_report_dw)->timer rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq &(&nsim_dev->trap_data->trap_report_dw)->timer rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq &(&nsim_dev->trap_data->trap_report_dw)->timer rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: softirq &(&nsim_dev->trap_data->trap_report_dw)->timer rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq &(&nsim_dev->trap_data->trap_report_dw)->timer rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond29#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond87 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond87 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond87 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond47#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond46#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond15#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &pcp->lock &zone->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond48#2 irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &pcp->lock &zone->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock quarantine_lock irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond48#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond16#2 irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond49#2 irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond88 irq_context: 0 (wq_completion)bond88 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond88 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock quarantine_lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#2 irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond50#2 irq_context: 0 (wq_completion)bond50#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond50#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond50#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond50#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond89 irq_context: 0 (wq_completion)bond89 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond89 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond17#4 irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond51 irq_context: 0 (wq_completion)bond51 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond51 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond51 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond51 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond31 irq_context: 0 (wq_completion)bond31 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond31 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 rcu_read_lock &bridge_netdev_addr_lock_key &n->list_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock &dev_addr_list_lock_key/1 rcu_read_lock &bridge_netdev_addr_lock_key &n->list_lock &c->lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) rcu_node_0 irq_context: 0 (wq_completion)bond88 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond90 irq_context: 0 (wq_completion)bond90 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond90 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond88 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond88 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond88 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond31 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond31 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond31 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond17#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &pn->all_ppp_mutex fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond49#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond89 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond89 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond89 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond51 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond51 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond51 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond50#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond50#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond50#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond16#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond30#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond90 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond90 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 sk_lock-AF_X25 irq_context: 0 sk_lock-AF_X25 slock-AF_X25 irq_context: 0 sk_lock-AF_X25 &mm->mmap_lock irq_context: 0 slock-AF_X25 irq_context: 0 (wq_completion)bond18#4 irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &tb->tb6_lock quarantine_lock irq_context: 0 (wq_completion)bond19#4 irq_context: 0 (wq_completion)bond19#4 &rq->__lock irq_context: 0 (wq_completion)bond19#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond91 irq_context: 0 (wq_completion)bond91 &rq->__lock irq_context: 0 (wq_completion)bond91 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond91 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond91 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond33 irq_context: 0 (wq_completion)bond33 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond33 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond33 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond33 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#3 irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock &anon_vma->rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond92 irq_context: 0 (wq_completion)bond92 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bat_events &cfs_rq->removed.lock irq_context: 0 (wq_completion)bat_events &obj_hash[i].lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)bond34 irq_context: 0 (wq_completion)bond34 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond34 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond34 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond34 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond34 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond34 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond34 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond34 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#3 irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond34 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond34 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond35 irq_context: 0 (wq_completion)bond35 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond35 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond35 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond35 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond35 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond35 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond35 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond35 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond35 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond35 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond35 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond35 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond35 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond35 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond35 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond35 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond35 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond35 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond34 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond34 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond34 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond34 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond34 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond34 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond34 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond34 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond34 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond34 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond34 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond34 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond34 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond34 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 rtnl_mutex rcu_state.barrier_mutex pgd_lock irq_context: 0 rtnl_mutex rcu_state.barrier_mutex stock_lock irq_context: 0 rtnl_mutex rcu_state.barrier_mutex rcu_read_lock pool_lock#2 irq_context: 0 rtnl_mutex rcu_state.barrier_mutex key irq_context: 0 rtnl_mutex rcu_state.barrier_mutex pcpu_lock irq_context: 0 rtnl_mutex rcu_state.barrier_mutex percpu_counters_lock irq_context: 0 rtnl_mutex rcu_state.barrier_mutex pcpu_lock stock_lock irq_context: 0 (wq_completion)bond36 irq_context: 0 (wq_completion)bond36 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond36 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &ht->mutex quarantine_lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond91 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond91 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond37 irq_context: 0 (wq_completion)bond37 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond37 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) rcu_read_lock &br->hash_lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) rcu_read_lock &br->hash_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) rcu_read_lock &br->hash_lock pool_lock#2 irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) rcu_read_lock &br->hash_lock nl_table_lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) rcu_read_lock &br->hash_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) rcu_read_lock &br->hash_lock nl_table_wait.lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &cookie->lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond33 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond37 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond37 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond21#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond91 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond37 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond92 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond92 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond92 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond33 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond33 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond33 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond36 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond36 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond36 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond20#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &rcu_state.expedited_wq irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &f->f_pos_lock sb_writers#4 &sb->s_type->i_mutex_key#8 remove_cache_srcu &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 rtnl_mutex pcpu_alloc_mutex rcu_node_0 irq_context: 0 rtnl_mutex pcpu_alloc_mutex &rcu_state.expedited_wq irq_context: 0 rtnl_mutex pcpu_alloc_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 rtnl_mutex pcpu_alloc_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex pcpu_alloc_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond38 irq_context: 0 (wq_completion)bond38 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond38 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond38 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond38 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &journal->j_state_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &rcu_state.expedited_wq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond38 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond35 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond35 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond35 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond34 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond34 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond34 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond18#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond19#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond38 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond38 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond38 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond39#2 irq_context: 0 (wq_completion)bond39#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond39#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond39#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond39#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond39#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond39#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond39#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond39#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond39#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond39#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond39#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond39#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond39#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond39#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond39#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond39#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond39#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond39#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond39#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond39#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond39#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond39#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond39#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond39#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond39#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)bond93 irq_context: 0 (wq_completion)bond93 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond93 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond93 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond93 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond39#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond39#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond39#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond39#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond39#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: softirq (&n->timer) stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond93 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond40#3 irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond6#4 irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &ht->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &ht->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &ht->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &ht->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)events (work_completion)(&ht->run_work) &ht->mutex &ht->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond41#3 irq_context: 0 (wq_completion)bond41#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond41#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond41#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond41#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond41#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond41#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond41#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond41#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94 irq_context: 0 (wq_completion)bond94 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond94 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond40#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond42#3 irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond95 irq_context: 0 (wq_completion)bond95 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond95 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex netpoll_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond43#3 irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex &idev->mc_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)wg-kex-wg0#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond44#3 irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96 irq_context: 0 (wq_completion)bond96 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond96 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97 irq_context: 0 (wq_completion)bond97 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond97 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond45#3 irq_context: 0 rtnl_mutex &wq->mutex &rq->__lock irq_context: 0 rtnl_mutex &wq->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond9#3 irq_context: 0 (wq_completion)bond9#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond9#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond9#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond9#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond9#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond9#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond9#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond9#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98 irq_context: 0 (wq_completion)bond98 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond98 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &xt[i].mutex rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_node_0 irq_context: 0 (wq_completion)bond98 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond98 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond98 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond9#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond9#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond9#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond9#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond9#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond41#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond41#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond41#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond41#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond41#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond41#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond41#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond41#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond41#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond41#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond41#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond41#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond41#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond41#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond41#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond41#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond41#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond41#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond41#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond41#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond41#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond41#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond41#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond97 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond97 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond97 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond46#3 irq_context: 0 (wq_completion)bond46#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond46#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond46#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond46#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond46#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond46#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond46#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond46#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond46#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond46#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond46#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond46#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond46#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond99 irq_context: 0 (wq_completion)bond99 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond99 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &c->lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)events_freezable (work_completion)(&vb->update_balloon_stats_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond99 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond99 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond46#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond46#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond46#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond46#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond46#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond46#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond46#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond46#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond46#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond46#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond46#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond46#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond46#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond46#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond46#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond46#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond46#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond46#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond46#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond46#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond46#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond46#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond41#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond41#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond41#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond96 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond96 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond96 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond43#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond94 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond94 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond98 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond6#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond95 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond95 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond95 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond45#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond42#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond44#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond9#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond9#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond9#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond9#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond9#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond9#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond9#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond9#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond9#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond9#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond9#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond9#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond9#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond9#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond9#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond9#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond9#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond9#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond9#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond9#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond9#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond9#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond9#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond9#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond9#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond9#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond9#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond9#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond9#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond9#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 sb_writers#5 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 sb_writers#5 fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 sb_writers#5 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond47#3 irq_context: 0 (wq_completion)bond47#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond47#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond47#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond47#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond47#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond47#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond47#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond47#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond47#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond47#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond47#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond47#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond47#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)bond47#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex kfence_freelist_lock irq_context: 0 (wq_completion)bond47#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond47#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond47#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond47#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond47#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &meta->lock irq_context: 0 (wq_completion)bond47#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond47#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond47#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond47#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond47#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond47#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond47#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond10#3 irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond47#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond47#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond47#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond10#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond48#3 irq_context: 0 (wq_completion)bond48#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond48#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond48#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond48#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond48#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond48#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond48#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond48#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond48#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond48#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond48#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond48#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond48#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond48#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond48#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond48#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond48#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond48#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond48#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond48#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond48#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond48#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond48#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond48#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond48#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond48#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond48#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond48#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond48#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond48#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond48#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond48#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond48#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond100 irq_context: 0 (wq_completion)bond100 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond100 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond49#3 irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond100 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond100 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond49#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond100 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 cgroup_threadgroup_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 cgroup_threadgroup_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 cgroup_threadgroup_rwsem rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond11#2 irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock &n->list_lock &c->lock irq_context: 0 ppp_mutex rtnl_mutex fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 ppp_mutex rtnl_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fill_pool_map-wait-type-override &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 fill_pool_map-wait-type-override &obj_hash[i].lock irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond11#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 &p->lock &of->mutex kn->active#5 &rcu_state.expedited_wq irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond50#3 irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 stock_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) rcu_node_0 irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg0#11 (work_completion)(&peer->transmit_handshake_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond12#2 irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex rcu_read_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond12#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond50#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond101 irq_context: 0 (wq_completion)bond101 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond101 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &____s->seqcount#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &____s->seqcount irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond51#2 irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102 irq_context: 0 (wq_completion)bond102 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond102 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond102 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &pcp->lock &zone->lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond102 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond102 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond102 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond101 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond101 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond101 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond51#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond52 irq_context: 0 (wq_completion)bond52 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond52 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103 irq_context: 0 (wq_completion)bond103 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond103 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq &(&bond->mcast_work)->timer rcu_read_lock &pool->lock/1 &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond14#2 irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond53 irq_context: 0 (wq_completion)bond53 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond53 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 slock-AF_INET6 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &root->kernfs_iattr_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond54 irq_context: 0 (wq_completion)bond54 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond54 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond54 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond54 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex bpf_devs_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond55 irq_context: 0 (wq_completion)bond55 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond55 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex remove_cache_srcu rcu_read_lock &cfs_rq->removed.lock irq_context: 0 rtnl_mutex remove_cache_srcu rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105 irq_context: 0 (wq_completion)bond105 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond105 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond56 irq_context: 0 (wq_completion)bond56 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond56 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex net_rwsem &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond76#2 irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106 irq_context: 0 (wq_completion)bond106 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond106 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond57 irq_context: 0 (wq_completion)bond57 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond57 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#3 irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)wg-kex-wg2#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock &sem->wait_lock irq_context: 0 (wq_completion)bond77#2 irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond58 irq_context: 0 (wq_completion)bond58 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond58 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &ndev->lock &ifa->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond78#2 irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock crngs.lock base_crng.lock irq_context: 0 (wq_completion)bond16#3 irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108 irq_context: 0 (wq_completion)bond108 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond108 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond79#2 irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex pcpu_alloc_mutex rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex pcpu_alloc_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond17#5 irq_context: 0 (wq_completion)bond17#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond17#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond17#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond17#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond17#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109 irq_context: 0 (wq_completion)bond109 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond109 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond31#2 irq_context: 0 (wq_completion)bond31#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond31#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond31#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond31#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond31#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond76#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond80#2 irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond106 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond106 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond106 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond32 irq_context: 0 (wq_completion)bond32 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond32 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond81#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &wb->work_lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &wb->work_lock &obj_hash[i].lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &wb->work_lock &base->lock irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 &wb->work_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond33#2 irq_context: 0 (wq_completion)bond33#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond33#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond33#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond33#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond110 irq_context: 0 (wq_completion)bond110 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond110 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond34#2 irq_context: 0 (wq_completion)bond34#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond34#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond34#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond34#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#5 irq_context: 0 (wq_completion)bond18#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond18#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond18#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#5 &sb->s_type->i_mutex_key#12 &xa->xa_lock#8 &c->lock irq_context: 0 (wq_completion)bond82#2 irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond57 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond57 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond57 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond57 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond35#2 irq_context: 0 (wq_completion)bond35#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond35#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond35#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond35#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond35#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond35#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond36#2 irq_context: 0 (wq_completion)bond36#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond36#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond36#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond36#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock init_task.mems_allowed_seq.seqcount irq_context: 0 (wq_completion)bond83 irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond52 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond52 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex quarantine_lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond83 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond83 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#5 irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: softirq rcu_callback stock_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond37#2 irq_context: 0 (wq_completion)bond37#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond37#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond37#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond37#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond84#2 irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond16#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond109 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond109 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond105 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond105 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond17#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond17#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond17#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond17#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond17#5 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond38#2 irq_context: 0 (wq_completion)bond38#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond38#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond38#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond38#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &n->list_lock irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock &tcp_hashinfo.bhash2[i].lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond39#3 irq_context: 0 (wq_completion)bond39#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond39#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond39#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond39#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond39#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond39#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond84#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond55 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond55 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond83 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond83 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond78#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond110 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond110 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond77#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond53 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond40#4 irq_context: 0 (wq_completion)bond40#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond40#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond40#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond40#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond103 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond103 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond80#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond105 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond40#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond40#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond40#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond40#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond36#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond36#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond36#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond32 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond32 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond32 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond19#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond31#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond31#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond31#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond54 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond52 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond14#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond108 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond108 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond108 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond35#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond35#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond35#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond34#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond34#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond34#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond109 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond38#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond38#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond38#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond41#4 irq_context: 0 (wq_completion)bond41#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond41#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond41#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond41#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond110 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond37#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond37#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond37#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond15#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond81#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond17#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond17#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond17#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond17#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond17#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond17#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond17#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond17#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond17#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond17#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond17#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond17#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond17#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond17#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond17#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond17#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond17#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond17#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond17#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond17#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond33#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond33#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond33#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond42#4 irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle pgd_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle key irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle pcpu_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle percpu_counters_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle pcpu_lock stock_lock irq_context: 0 (wq_completion)bond112 irq_context: 0 (wq_completion)bond112 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond112 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond112 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond112 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond42#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond82#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond39#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond39#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond39#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond18#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond18#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond18#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond55 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond56 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond56 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond56 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond79#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond58 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond58 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond58 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond83 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond41#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond41#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond41#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond53 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond53 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond53 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 kn->active#5 &pcp->lock &zone->lock &____s->seqcount irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq rcu_read_lock rcu_read_lock slock-AF_INET6/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 tracepoints_mutex rcu_node_0 irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)wg-crypt-wg0#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock pool_lock#2 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5 tomoyo_ss rcu_read_lock &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond87#2 irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work quarantine_lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond87#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond116 irq_context: 0 (wq_completion)bond116 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond116 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond116 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond116 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &ht->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &fsnotify_mark_srcu rcu_read_lock &rcu_state.gp_wq irq_context: 0 &fsnotify_mark_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 &fsnotify_mark_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 &fsnotify_mark_srcu rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&peer->transmit_packet_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond117 irq_context: 0 (wq_completion)bond117 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond117 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#2 &obj_hash[i].lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond117 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->mii_work)->work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->mii_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: softirq (&ndev->rs_timer) &ndev->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) &ndev->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond117 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock batched_entropy_u8.lock irq_context: 0 rtnl_mutex &idev->mc_lock kfence_freelist_lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_node_0 irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)wg-crypt-wg1#5 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)events (work_completion)(&(&ovs_net->masks_rebalance)->work) ovs_mutex &____s->seqcount irq_context: 0 (wq_completion)bond25#3 irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&slave->notify_work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond25#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond27#4 irq_context: 0 (wq_completion)bond27#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond27#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond27#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond27#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond27#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond27#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond27#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override &rq->__lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex fill_pool_map-wait-type-override &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond88#2 irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond88#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond89#2 irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond89#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond28#4 irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond28#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tomoyo_ss fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 tomoyo_ss fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond29#4 irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 ppp_mutex rtnl_mutex fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 ppp_mutex rtnl_mutex fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond120 irq_context: 0 (wq_completion)bond120 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond120 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond120 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond120 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond30#3 irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#2 irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond67 irq_context: 0 (wq_completion)bond67 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond67 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &n->list_lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond67 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond67 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond67 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond91#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond120 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond30#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond68 irq_context: 0 (wq_completion)bond68 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond68 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond68 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond68 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond68 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond69 irq_context: 0 (wq_completion)bond69 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond69 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex lock#7 &rq->__lock irq_context: 0 rtnl_mutex lock#7 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond69 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond69 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond70 irq_context: 0 (wq_completion)bond70 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond70 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond69 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond70 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond70 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond70 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond71 irq_context: 0 (wq_completion)bond71 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond71 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond72 irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override rcu_node_0 irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond71 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond71 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond71 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond72 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond72 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond72 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond72 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 prog_idr_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 prog_idr_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond73 irq_context: 0 (wq_completion)bond73 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond73 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond73 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond73 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond73 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex wq_pool_mutex &wq->mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&br->gc_work)->timer rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 sk_lock-AF_INET6 lock sctp_assocs_id_lock &____s->seqcount#2 irq_context: 0 sk_lock-AF_INET6 lock sctp_assocs_id_lock &____s->seqcount irq_context: 0 &mm->mmap_lock sb_writers#4 rcu_read_lock rcu_node_0 irq_context: 0 &mm->mmap_lock sb_writers#4 rcu_read_lock &rq->__lock irq_context: 0 &mm->mmap_lock sb_writers#4 rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#5 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock irq_context: 0 sb_writers#5 &type->i_mutex_dir_key#5/1 tomoyo_ss rcu_read_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg1#9 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh &sch->q.lock hrtimer_bases.lock tk_core.seq.seqcount irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &c->lock irq_context: softirq (&peer->timer_retransmit_handshake) &list->lock#14 quarantine_lock irq_context: 0 &pipe->mutex/1 &mm->mmap_lock rcu_node_0 irq_context: 0 tracepoints_mutex &rnp->exp_lock irq_context: 0 tracepoints_mutex rcu_state.exp_mutex irq_context: 0 tracepoints_mutex rcu_state.exp_mutex rcu_state.exp_mutex.wait_lock irq_context: 0 ppp_mutex &mm->mmap_lock rcu_read_lock rcu_node_0 irq_context: 0 ppp_mutex &mm->mmap_lock rcu_read_lock &rq->__lock irq_context: 0 ppp_mutex &mm->mmap_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rcu_read_lock rcu_read_lock rcu_read_lock rcu_read_lock &(&bp->lock)->lock &____s->seqcount#13 &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)events (work_completion)(&(&krcp->krw_arr[i].rcu_work)->work) rcu_callback rcu_node_0 irq_context: 0 &f->f_pos_lock sb_writers#3 &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)events_unbound (work_completion)(&sub_info->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 tasklist_lock &base->lock irq_context: 0 tasklist_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2#9 (work_completion)(&peer->transmit_handshake_work) &n->list_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg0#10 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &wg->static_identity.lock &handshake->lock rcu_read_lock_bh batched_entropy_u32.lock crngs.lock irq_context: 0 rcu_read_lock console_owner_lock irq_context: 0 rcu_read_lock console_owner irq_context: 0 rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond36#3 irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#3 irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125 irq_context: 0 (wq_completion)bond125 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond125 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond125 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond125 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond125 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond126 irq_context: 0 (wq_completion)bond126 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond126 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond38#3 irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex free_vmap_area_lock &obj_hash[i].lock irq_context: 0 rtnl_mutex free_vmap_area_lock pool_lock#2 irq_context: 0 (wq_completion)bond80#3 irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 sb_internal &rcu_state.expedited_wq &p->pi_lock irq_context: 0 sb_writers#4 sb_internal &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 sb_internal &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond127 irq_context: 0 (wq_completion)bond127 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond127 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond39#4 irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond126 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond126 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond38#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond126 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond128 irq_context: 0 (wq_completion)bond128 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond128 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rcu_read_lock_bh rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond40#5 irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex fs_reclaim &cfs_rq->removed.lock irq_context: 0 rtnl_mutex fs_reclaim &obj_hash[i].lock irq_context: 0 &tsk->futex_exit_mutex rcu_node_0 irq_context: 0 &tsk->futex_exit_mutex &rcu_state.expedited_wq irq_context: 0 &tsk->futex_exit_mutex &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &tsk->futex_exit_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &tsk->futex_exit_mutex &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->transmit_handshake_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond129 irq_context: 0 (wq_completion)bond129 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond129 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond128 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond128 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond129 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond129 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond127 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond127 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond127 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond80#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond78#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond36#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond41#5 irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond130 irq_context: 0 (wq_completion)bond130 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond130 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond42#5 irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#3 &sb->s_type->i_mutex_key#9 rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond131 irq_context: 0 (wq_completion)bond131 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond131 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond43#4 irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond132 irq_context: 0 (wq_completion)bond132 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond132 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond44#4 irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &base->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &base->lock fill_pool_map-wait-type-override pool_lock irq_context: softirq (&ndev->rs_timer) batched_entropy_u8.lock crngs.lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)bond133 irq_context: 0 (wq_completion)bond133 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond133 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond45#4 irq_context: 0 (wq_completion)bond45#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond45#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond45#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond45#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond45#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond45#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond45#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond45#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond87#3 irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond134 irq_context: 0 (wq_completion)bond134 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond134 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond133 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond133 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond134 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond134 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond43#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond46#4 irq_context: 0 (wq_completion)bond46#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond46#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond46#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond46#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond46#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond46#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond46#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond46#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond88#3 irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond135 irq_context: 0 (wq_completion)bond135 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond135 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond46#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond46#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond46#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond46#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond46#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond45#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond45#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond45#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond45#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond45#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond133 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond131 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond131 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond131 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond44#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond47#4 irq_context: 0 (wq_completion)bond47#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond47#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond47#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond47#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &xt[i].mutex rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &xt[i].mutex rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &xt[i].mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond47#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond47#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond47#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond47#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#2 irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond89#3 irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond45#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond45#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136 irq_context: 0 (wq_completion)bond136 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond136 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock rcu_read_lock_bh rcu_read_lock quarantine_lock irq_context: 0 (wq_completion)bond48#4 irq_context: 0 (wq_completion)bond48#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond48#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond48#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond48#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond48#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond48#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond48#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond48#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#2 irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond48#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond48#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#2 irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &vma->vm_lock->lock &lruvec->lru_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)bond132 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond137 irq_context: 0 (wq_completion)bond137 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond137 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex pool_lock#2 irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond88#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond41#5 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond45#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond45#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond45#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond45#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond45#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond45#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond45#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond45#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond45#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond45#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond45#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond45#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond45#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond45#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond45#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond45#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond45#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond45#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond45#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond45#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond45#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond45#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond45#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond45#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond132 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond132 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond132 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond130 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond130 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond130 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond128 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq &(&bat_priv->orig_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond49#4 irq_context: 0 (wq_completion)bond49#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond49#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond49#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond49#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond48#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond48#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond48#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond48#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond48#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond48#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond48#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond48#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond48#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond48#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond48#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond48#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond48#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond48#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond137 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond137 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond49#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond49#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond49#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond49#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#2 irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#3 irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138 irq_context: 0 (wq_completion)bond138 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond138 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond50#4 irq_context: 0 (wq_completion)bond50#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond50#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond50#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond50#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond50#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond50#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond50#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond50#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond68#2 irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#3 irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond139 irq_context: 0 (wq_completion)bond139 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond139 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#3 irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond140 irq_context: 0 (wq_completion)bond140 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond140 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond52#2 irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#2 irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond69#2 irq_context: 0 (wq_completion)bond69#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond69#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond69#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond69#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#3 irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)wg-crypt-wg0 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)rcu_gp (work_completion)(&(&ssp->srcu_sup->work)->work) &ssp->srcu_sup->srcu_cb_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override &n->list_lock &c->lock irq_context: 0 (wq_completion)bond141 irq_context: 0 (wq_completion)bond141 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond141 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond69#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond53#2 irq_context: 0 (wq_completion)bond53#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond53#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond53#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond53#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond53#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond53#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond53#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond53#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond47#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond47#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond47#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond47#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond47#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond53#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond53#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond49#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond49#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond49#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond49#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond49#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond49#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond49#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond49#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond49#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond49#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond49#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond49#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond49#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond49#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond49#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond49#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond49#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond49#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond49#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond49#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond49#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond49#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond49#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond49#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond49#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond49#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond49#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond49#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond49#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond49#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond49#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond49#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond49#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond49#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond49#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond49#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond49#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond49#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond96#2 irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond48#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond48#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond48#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond48#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond48#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond40#5 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond137 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock kfence_freelist_lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &meta->lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond129 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &cfs_rq->removed.lock irq_context: 0 rtnl_mutex &root->kernfs_rwsem &root->kernfs_iattr_rwsem &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock quarantine_lock irq_context: 0 (wq_completion)bond95#3 irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#3 &rq->__lock irq_context: 0 (wq_completion)bond142 irq_context: 0 (wq_completion)bond142 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond142 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override batched_entropy_u8.lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override kfence_freelist_lock irq_context: 0 (wq_completion)bond70#2 irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond54#2 irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#2 irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#3 irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)bond143 irq_context: 0 (wq_completion)bond143 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond143 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &tb->tb6_lock rcu_read_lock rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond55#2 irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond71#2 irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) rcu_read_lock_bh fill_pool_map-wait-type-override pool_lock irq_context: softirq &(&bond->mcast_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: softirq &(&bond->mcast_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq &(&bond->mcast_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: softirq &(&bond->mcast_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond98#2 irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#3 irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: softirq &(&group->avgs_work)->timer rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond53#2 &rq->__lock irq_context: 0 (wq_completion)bond53#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond138 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond70#2 &rq->__lock irq_context: 0 (wq_completion)bond70#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond144 irq_context: 0 (wq_completion)bond144 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond144 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &mm->mmap_lock stock_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)wg-crypt-wg0#3 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond49#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond49#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond49#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond49#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond49#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond98#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond87#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond138 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond138 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond47#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond47#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond47#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond47#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond47#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond47#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond47#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond47#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond47#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond47#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond47#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond47#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond47#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond47#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond47#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond47#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond47#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond47#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond47#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond47#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond47#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond47#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond47#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond47#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond47#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond47#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond47#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond47#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond47#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond47#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond47#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond47#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond47#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond47#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond47#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond56#2 irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#3 irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond72#2 irq_context: 0 (wq_completion)bond72#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond72#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond72#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond72#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond145 irq_context: 0 (wq_completion)bond145 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond145 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#2 irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond57#2 irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond140 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond140 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond46#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond140 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond46#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond46#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond46#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond46#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond46#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond46#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond46#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond46#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond46#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond46#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond46#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond46#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond46#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond46#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond46#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond46#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond46#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond46#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond46#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond46#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond46#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond46#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond46#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond46#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond46#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond46#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond46#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond46#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond46#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond46#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond46#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond46#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond46#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond46#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond46#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond46#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_read_lock &net->sctp.local_addr_lock &net->sctp.addr_wq_lock &____s->seqcount#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &dev->tx_global_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex &dev->tx_global_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond140 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (work_completion)(&data->fib_event_work) &data->fib_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond146 irq_context: 0 (wq_completion)bond146 &rq->__lock irq_context: 0 (wq_completion)bond146 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond146 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond146 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond73#2 irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#2 irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond58#2 irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&in_dev->mr_ifc_timer) fill_pool_map-wait-type-override &c->lock irq_context: softirq (&in_dev->mr_ifc_timer) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq (&in_dev->mr_ifc_timer) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond138 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond100#3 irq_context: 0 (wq_completion)bond100#3 &rq->__lock irq_context: 0 (wq_completion)bond100#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147 irq_context: 0 (wq_completion)bond147 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond147 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond59 irq_context: 0 (wq_completion)bond59 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond59 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond9#4 irq_context: 0 (wq_completion)bond9#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond9#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond9#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond9#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond9#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond9#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond9#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond9#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond56#2 &rq->__lock irq_context: 0 (wq_completion)bond101#2 irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex remove_cache_srcu pool_lock#2 irq_context: softirq (&dsp_spl_tl) dsp_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&dsp_spl_tl) dsp_lock fill_pool_map-wait-type-override &c->lock irq_context: softirq (&dsp_spl_tl) dsp_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq (&dsp_spl_tl) dsp_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: softirq (&dsp_spl_tl) dsp_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond74 irq_context: 0 (wq_completion)bond74 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond74 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148 irq_context: 0 (wq_completion)bond148 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond148 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#3 irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond89#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond92#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond68#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond97#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond145 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond145 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond60 irq_context: 0 (wq_completion)bond60 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond60 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond9#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond9#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond10#4 irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &pcp->lock &zone->lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond102#2 irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond144 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond144 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond144 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond149 irq_context: 0 (wq_completion)bond149 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond149 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond61 irq_context: 0 (wq_completion)bond61 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond61 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond75#2 irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh quarantine_lock irq_context: 0 (wq_completion)bond102#3 irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond11#3 irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond103#2 irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond150 irq_context: 0 (wq_completion)bond150 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond150 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond62 irq_context: 0 (wq_completion)bond62 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond62 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond150 &rq->__lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond96#3 &rq->__lock irq_context: 0 (wq_completion)bond12#3 irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#3 irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104 irq_context: 0 (wq_completion)bond104 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond104 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond151 irq_context: 0 (wq_completion)bond151 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond151 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond97#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond58#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond63 irq_context: 0 (wq_completion)bond63 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond63 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&app->join_timer)#2 fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&app->join_timer)#2 fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond13#2 irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond104#2 irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond77#3 irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#2 irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152 irq_context: 0 (wq_completion)bond152 &rq->__lock irq_context: 0 (wq_completion)bond152 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond152 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond152 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond147 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond64 irq_context: 0 (wq_completion)bond64 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond64 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond14#3 irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#2 irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond102#3 &rq->__lock irq_context: 0 (wq_completion)bond102#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond50#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond50#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond50#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond50#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond50#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond50#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond50#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond50#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond50#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond50#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond50#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond50#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond50#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond50#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond50#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond50#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond50#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond50#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond50#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond50#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond50#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond50#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond50#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond50#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond50#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond50#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond50#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond50#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond50#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond50#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond50#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond50#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond50#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond153 irq_context: 0 (wq_completion)bond153 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond153 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond105#3 irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond78#4 irq_context: 0 (wq_completion)bond78#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond78#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond78#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond65 irq_context: 0 (wq_completion)bond65 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond65 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#4 irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond62 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond62 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond62 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond107 irq_context: 0 (wq_completion)bond107 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond107 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond154 irq_context: 0 (wq_completion)bond154 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond154 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond149 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond149 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond78#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond69#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond106#3 irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond75#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond148 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond66 irq_context: 0 (wq_completion)bond66 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond66 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#4 irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond9#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond9#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond154 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond79#3 irq_context: 0 (wq_completion)bond79#3 &rq->__lock irq_context: 0 (wq_completion)bond79#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond108#2 irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond155 irq_context: 0 (wq_completion)bond155 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond155 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#2 irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond67#2 irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond155 &rq->__lock irq_context: 0 (wq_completion)bond155 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond17#6 irq_context: 0 (wq_completion)bond17#6 &rq->__lock irq_context: 0 (wq_completion)bond17#6 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond17#6 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#6 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond17#6 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#6 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond17#6 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond17#6 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#6 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond17#6 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond109#2 irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &rcu_state.expedited_wq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &rcu_state.expedited_wq &p->pi_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond156 irq_context: 0 (wq_completion)bond156 &rq->__lock irq_context: 0 (wq_completion)bond156 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond156 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond156 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond59 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond59 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond59 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond108#3 irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond50#4 &rq->__lock irq_context: 0 (wq_completion)bond50#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond68#3 irq_context: 0 (wq_completion)bond68#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond68#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond68#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond68#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond68#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond68#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond68#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond68#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond143 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond74 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond18#6 irq_context: 0 (wq_completion)bond18#6 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond18#6 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#6 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond18#6 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#6 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond18#6 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#6 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond18#6 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#6 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond18#6 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond110#2 irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond157 irq_context: 0 (wq_completion)bond157 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond157 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond109#3 irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143 &rq->__lock irq_context: 0 (wq_completion)bond143 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond64 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond69#3 irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond73#2 &rq->__lock irq_context: 0 (wq_completion)bond73#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond19#6 irq_context: 0 (wq_completion)bond19#6 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond19#6 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#6 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond19#6 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond57#2 &rq->__lock irq_context: 0 (wq_completion)bond57#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond19#6 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond19#6 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#6 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond19#6 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond19#6 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond19#6 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond104 &rq->__lock irq_context: 0 (wq_completion)bond104 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond60 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond60 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond60 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond60 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond14#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond71#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond93#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond91#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond73#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond139 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond139 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond68#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond68#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond68#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond68#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond68#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond68#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond68#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond68#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond68#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond68#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond68#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond68#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond68#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond150 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond150 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond158 irq_context: 0 (wq_completion)bond158 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond158 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond145 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond66 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond66 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond55#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond156 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond153 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond153 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond153 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond70#3 irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &im->lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond112#2 irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond20#4 irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond139 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond94#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond50#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond50#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond50#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond50#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond50#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond74 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond74 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond74 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond159 irq_context: 0 (wq_completion)bond159 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond159 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex rcu_read_lock &stopper->lock irq_context: 0 rtnl_mutex rcu_read_lock &stop_pi_lock irq_context: 0 rtnl_mutex rcu_read_lock &stop_pi_lock &rq->__lock irq_context: 0 rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start rcu_node_0 irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override &cfs_rq->removed.lock irq_context: 0 rtnl_mutex rcu_read_lock &stop_pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond13#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond158 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond158 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond39#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond106#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 rtnl_mutex uevent_sock_mutex fs_reclaim &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond71#3 irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#3 irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond104#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond72#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond113 irq_context: 0 (wq_completion)bond113 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond113 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#4 irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond160 irq_context: 0 (wq_completion)bond160 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond160 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#4 &rq->__lock irq_context: 0 (wq_completion)bond15#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond72#3 irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond18#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond18#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond18#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond18#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond18#6 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond17#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond17#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond17#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond17#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond17#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond17#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond17#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond17#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond17#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond17#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond17#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond17#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond17#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond17#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond17#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond17#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond17#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond17#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond17#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond17#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond17#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond17#6 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond152 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond152 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond111 irq_context: 0 (wq_completion)bond111 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond111 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond114 irq_context: 0 (wq_completion)bond64 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond64 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond64 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond19#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond19#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond19#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond19#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond19#6 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_node_0 irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond114 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond114 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#3 irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond161 irq_context: 0 (wq_completion)bond161 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond161 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond73#3 irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond112#3 irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115 irq_context: 0 (wq_completion)bond115 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond115 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#3 irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond162 irq_context: 0 (wq_completion)bond162 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond162 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond74#2 irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond68#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond68#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond94#2 &rq->__lock irq_context: 0 (wq_completion)bond94#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond116#2 irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond57#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond113#2 irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond151 &rq->__lock irq_context: 0 (wq_completion)bond151 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond100#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond158 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond24#3 irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond163 irq_context: 0 (wq_completion)bond163 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond163 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond75#3 irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond111 &rq->__lock irq_context: 0 (wq_completion)bond111 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond117#2 irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond114#2 irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond19#6 &rq->__lock irq_context: 0 (wq_completion)bond25#4 irq_context: 0 (wq_completion)bond25#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond25#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond25#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond25#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond25#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond25#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond25#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond25#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond25#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&peer->timer_zero_key_material) rcu_read_lock_bh rcu_read_lock &pool->lock/1 &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->clear_peer_work) irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->clear_peer_work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->clear_peer_work) &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg2 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->clear_peer_work) irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->clear_peer_work) &handshake->lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->clear_peer_work) &handshake->lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock &table->lock#2 irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->clear_peer_work) &peer->keypairs.keypair_update_lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->clear_peer_work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg1 (work_completion)(&peer->clear_peer_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond164 irq_context: 0 (wq_completion)bond164 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond164 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond103#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond105#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)events (work_completion)(&(&group->avgs_work)->work) &group->avgs_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond105#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond92#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond76#3 irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond118 irq_context: 0 (wq_completion)bond118 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond118 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#2 irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#4 irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond162 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond162 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond162 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond101#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond162 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond115 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond165 irq_context: 0 (wq_completion)bond165 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond165 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond96#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond77#4 irq_context: 0 (wq_completion)bond77#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond77#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond77#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond77#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond77#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond77#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond77#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond77#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 rtnl_mutex fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond119 irq_context: 0 (wq_completion)bond119 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond119 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond53#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond53#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond116#3 irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond27#5 irq_context: 0 (wq_completion)bond27#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond27#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond27#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond27#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond27#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond27#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond27#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond27#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond166 irq_context: 0 (wq_completion)bond166 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond166 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#5 irq_context: 0 (wq_completion)bond78#5 &rq->__lock irq_context: 0 (wq_completion)bond78#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond78#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond78#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond78#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond78#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond78#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond78#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond120#2 irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#3 irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#5 irq_context: 0 (wq_completion)bond28#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond28#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond28#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond28#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond28#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg0#6 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &n->list_lock &c->lock irq_context: 0 (wq_completion)bond167 irq_context: 0 (wq_completion)bond167 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond167 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond79#4 irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond163 &rq->__lock irq_context: 0 (wq_completion)bond163 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond121 irq_context: 0 (wq_completion)bond121 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond121 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond134 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond118#2 irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#5 irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond168 irq_context: 0 (wq_completion)bond168 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond168 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond168 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond168 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond168 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond168 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond168 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond168 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#4 irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond122 irq_context: 0 (wq_completion)bond122 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond122 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond118 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond119#2 irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond164 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond169 irq_context: 0 (wq_completion)bond169 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond169 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond81#3 irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123 irq_context: 0 (wq_completion)bond123 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond123 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond119 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond119 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond31#3 irq_context: 0 (wq_completion)bond31#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond31#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond31#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond31#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond31#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond31#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond31#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond31#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond170 irq_context: 0 (wq_completion)bond170 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond170 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond82#3 irq_context: 0 (wq_completion)bond82#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond82#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond82#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond82#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &bgl->locks[i].lock irq_context: 0 (wq_completion)bond82#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond82#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond82#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond82#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg2#11 (work_completion)(&peer->transmit_handshake_work) &wg->static_identity.lock &handshake->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond75#2 &rq->__lock irq_context: 0 (wq_completion)bond75#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond124 irq_context: 0 (wq_completion)bond124 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond124 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&in_dev->mr_ifc_timer) rcu_read_lock &tbl->lock batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &sb->s_type->i_mutex_key#8 bit_wait_table + i irq_context: softirq &ret->b_uptodate_lock bit_wait_table + i &p->pi_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 bit_wait_table + i irq_context: 0 (wq_completion)bond32#2 irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond169 &rq->__lock irq_context: 0 (wq_completion)bond169 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond144 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond141 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond107#2 &rq->__lock irq_context: 0 (wq_completion)bond107#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond171 irq_context: 0 (wq_completion)bond171 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond171 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond83#2 irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond125#2 irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond25#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond25#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond53#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond53#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond53#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond53#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond53#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond53#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond53#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond53#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond53#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond53#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond53#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond53#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond53#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond53#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond53#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond53#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond53#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond53#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond53#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond53#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond53#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond53#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond53#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond53#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond53#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond33#3 irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond20#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond172 irq_context: 0 (wq_completion)bond172 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond172 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond84#3 irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#2 irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond95#3 &rq->__lock irq_context: 0 (wq_completion)bond95#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond171 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond146 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond146 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond31#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond31#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond31#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond31#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond31#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond31#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond31#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond31#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond31#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond31#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond31#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond31#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond31#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) devices_rwsem &rq->__lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) devices_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond74#2 &rq->__lock irq_context: 0 (wq_completion)bond74#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond113 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond113 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond113 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond113 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond113 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond9#4 &rq->__lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond120#3 irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond28#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond28#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond28#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond28#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond28#5 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond31#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond31#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond31#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond31#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond31#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond173 irq_context: 0 (wq_completion)bond173 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond173 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond34#3 irq_context: 0 (wq_completion)bond34#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond34#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond34#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond34#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond34#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond34#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond34#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond34#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond34#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond34#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond117#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond85#2 irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#2 irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq &(&slave->notify_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &c->lock irq_context: softirq &(&slave->notify_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq &(&slave->notify_work)->timer rcu_read_lock &pool->lock/1 fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#2 &rq->__lock irq_context: 0 (wq_completion)bond95#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond28#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond28#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond121#2 irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock &pcp->lock &zone->lock irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock &tbl->lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond110#2 &rq->__lock irq_context: 0 (wq_completion)bond110#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond174 irq_context: 0 (wq_completion)bond174 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond174 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond78#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond174 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond78#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond78#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond78#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond78#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond156 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond156 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond156 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond156 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond163 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond174 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond174 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond174 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond35#3 irq_context: 0 (wq_completion)bond35#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond35#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond35#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond35#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond35#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond35#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond35#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond35#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond35#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond35#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond35#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond42#5 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &hard_iface->bat_iv.ogm_buff_mutex fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 rtnl_mutex &idev->mc_lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)bond86#2 irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond128#2 irq_context: 0 (wq_completion)bond128#2 &rq->__lock irq_context: 0 (wq_completion)bond128#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond122#2 irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond78#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond78#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond78#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond78#5 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond17#6 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond17#6 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond17#6 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond17#6 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)events (work_completion)(&rdev->wiphy_work) &rdev->wiphy.mtx &wdev->mtx &rdev->bss_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond175 irq_context: 0 (wq_completion)bond175 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond175 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond36#4 irq_context: 0 (wq_completion)bond36#4 &rq->__lock irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#4 irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &base->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &base->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 &base->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond129#2 irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond123#2 irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond123 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond28#5 &rq->__lock irq_context: 0 (wq_completion)bond28#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond146 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond21#4 &rq->__lock irq_context: 0 (wq_completion)bond21#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond176 irq_context: 0 (wq_completion)bond176 &rq->__lock irq_context: 0 (wq_completion)bond176 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond176 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond176 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond176 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&ndev_work->work) devices_rwsem rcu_node_0 irq_context: 0 (wq_completion)bond168 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond88#4 irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond93#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond130#2 irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond124#2 irq_context: 0 (wq_completion)bond124#2 &rq->__lock irq_context: 0 (wq_completion)bond124#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond177 irq_context: 0 (wq_completion)bond177 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond177 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond89#4 irq_context: 0 (wq_completion)bond89#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond89#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond89#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond89#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond89#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond124#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond89#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond89#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond89#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond89#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond89#4 &rq->__lock irq_context: 0 (wq_completion)bond89#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond168 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond131#2 irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex quarantine_lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond123 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond125#3 irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond87#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond79#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 nl_table_wait.lock &p->pi_lock &cfs_rq->removed.lock irq_context: 0 &sb->s_type->i_mutex_key#10 nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond178 irq_context: 0 (wq_completion)bond178 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond178 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond12#3 &rq->__lock irq_context: 0 (wq_completion)bond12#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#3 irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond17#6 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond132#2 irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond176 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond176 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond176 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond34#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond34#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond165 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond126#3 irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#3 &rq->__lock irq_context: 0 (wq_completion)bond110#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock quarantine_lock irq_context: 0 (wq_completion)bond179 irq_context: 0 (wq_completion)bond179 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond179 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)events_unbound connector_reaper_work fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)events_unbound connector_reaper_work fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond124 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond91#4 irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#2 irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond167 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond109#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond112#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond127#3 irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond180 irq_context: 0 (wq_completion)bond180 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond180 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond157 &rq->__lock irq_context: 0 (wq_completion)bond157 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond167 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond167 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond167 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond92#4 irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#2 irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond12#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond181 irq_context: 0 (wq_completion)bond181 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond181 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond93#4 irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock quarantine_lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond135#2 irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock &tbl->lock irq_context: 0 cgroup_threadgroup_rwsem freezer_mutex &rcu_state.expedited_wq irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond37#3 irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond182 irq_context: 0 (wq_completion)bond182 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond182 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond128#3 irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#4 irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond154 &rq->__lock irq_context: 0 (wq_completion)bond154 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond89#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond82#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond136#2 irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex uevent_sock_mutex fs_reclaim mmu_notifier_invalidate_range_start &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond183 irq_context: 0 (wq_completion)bond183 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond183 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#3 irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond64 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond95#4 irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond137#2 irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond77#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond77#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex cpu_hotplug_lock wq_pool_mutex rcu_node_0 irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond184 irq_context: 0 (wq_completion)bond184 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond184 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond38#4 irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond22#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond27#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond27#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond75#3 &rq->__lock irq_context: 0 (wq_completion)bond75#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond96#4 irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond9#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond9#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond9#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond9#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond9#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond16#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond138#2 irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond127#3 &rq->__lock irq_context: 0 (wq_completion)bond127#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond173 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond175 &rq->__lock irq_context: 0 (wq_completion)bond175 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond185 irq_context: 0 (wq_completion)bond185 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond185 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond118#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond39#5 irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond101#3 &rq->__lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond113#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond35#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond35#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond97#4 irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#2 irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond101#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond186 irq_context: 0 (wq_completion)bond186 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond186 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond86#2 &rq->__lock irq_context: 0 (wq_completion)bond97#4 &rq->__lock irq_context: 0 (wq_completion)bond97#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond98#4 irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond140#2 irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond177 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex &pool->lock &p->pi_lock &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond187 irq_context: 0 (wq_completion)bond187 &rq->__lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond187 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond124 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond124 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond164 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond29#5 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond119 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond142 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond142 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond142 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond84#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond63 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond181 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond99#3 irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond40#6 irq_context: 0 (wq_completion)bond40#6 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond40#6 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond40#6 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond40#6 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock quarantine_lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond122#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond175 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond175 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond175 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond175 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond40#6 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond40#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond40#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond40#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond40#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond40#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond40#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond40#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond40#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond40#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond40#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond40#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond40#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond40#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond40#6 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond176 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond117#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond178 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond118 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond118 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond118 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond75#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond114 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond23#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex kfence_freelist_lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &meta->lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond141#2 irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override &n->list_lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond141#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond130#3 irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond122#2 &rq->__lock irq_context: 0 (wq_completion)bond122#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond115#2 &rq->__lock irq_context: 0 (wq_completion)bond115#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond136#2 &rq->__lock irq_context: 0 (wq_completion)bond136#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond188 irq_context: 0 (wq_completion)bond188 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond188 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond41#6 irq_context: 0 (wq_completion)bond41#6 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond41#6 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond41#6 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond41#6 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)gid-cache-wq &rq->__lock irq_context: 0 (wq_completion)gid-cache-wq &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond41#6 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond41#6 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond41#6 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond41#6 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#4 irq_context: 0 (wq_completion)bond100#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond100#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond100#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond100#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond100#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond100#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond100#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond102#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond83#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond170 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond173 &rq->__lock irq_context: 0 (wq_completion)bond173 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond142#2 irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond131#3 irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond81#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond189 irq_context: 0 (wq_completion)bond189 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond189 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond101#4 irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond143#2 irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#6 &rq->__lock irq_context: 0 (wq_completion)bond132#3 irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond155 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond190 irq_context: 0 (wq_completion)bond190 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond190 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond37#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond102#4 irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond41#6 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond41#6 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond144#2 irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond188 &rq->__lock irq_context: 0 (wq_completion)bond188 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &group->mark_mutex rcu_node_0 irq_context: 0 (wq_completion)bond42#6 irq_context: 0 (wq_completion)bond42#6 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond42#6 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond42#6 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond42#6 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond42#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond42#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond42#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond42#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond42#6 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond154 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond154 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond154 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond127#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond151 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond151 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond42#6 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond42#6 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond42#6 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond42#6 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond98#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond79#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond86#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond191 irq_context: 0 (wq_completion)bond191 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond191 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond191 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond191 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond140#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond191 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond191 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond191 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond191 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond103#4 irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond145#2 irq_context: 0 (wq_completion)bond145#2 &rq->__lock irq_context: 0 (wq_completion)bond145#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond185 &rq->__lock irq_context: 0 (wq_completion)bond185 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond43#5 irq_context: 0 (wq_completion)bond43#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond43#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond43#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond43#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond43#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond43#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond43#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond43#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond43#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond43#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_PACKET &po->bind_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 sk_lock-AF_PACKET &po->bind_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond192 irq_context: 0 (wq_completion)bond192 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond192 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond180 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond104#3 irq_context: 0 (wq_completion)bond104#3 &rq->__lock irq_context: 0 (wq_completion)bond104#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond184 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond184 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond146#2 irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond168 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond168 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond168 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond168 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond168 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond44#5 irq_context: 0 (wq_completion)bond44#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond44#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond44#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond44#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond187 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond76#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond41#6 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond41#6 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond44#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond44#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond44#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond44#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond44#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond44#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond98#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond152 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond131#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond130#3 &rq->__lock irq_context: 0 (wq_completion)bond130#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond67#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond193 irq_context: 0 (wq_completion)bond193 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond193 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &meta->lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond83#2 &rq->__lock irq_context: 0 (wq_completion)bond83#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond90#2 &rq->__lock irq_context: 0 (wq_completion)bond90#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex (console_sem).lock irq_context: 0 vlan_ioctl_mutex console_lock console_srcu console_owner_lock irq_context: 0 vlan_ioctl_mutex console_lock console_srcu console_owner irq_context: 0 vlan_ioctl_mutex console_lock console_srcu console_owner &port_lock_key irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond90#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 vlan_ioctl_mutex console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond95#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond191 &rq->__lock irq_context: 0 (wq_completion)bond191 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex console_owner_lock irq_context: 0 vlan_ioctl_mutex console_owner irq_context: 0 (wq_completion)bond194 irq_context: 0 (wq_completion)bond105#4 irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 vlan_ioctl_mutex rcu_node_0 irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond194 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond194 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond106#4 irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond97#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond147#2 irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond186 (work_completion)(&(&slave->notify_work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond165 &rq->__lock irq_context: 0 (wq_completion)bond165 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond195 irq_context: 0 (wq_completion)bond195 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond195 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond167 &rq->__lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#3 irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#2 irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond196 irq_context: 0 (wq_completion)bond196 &rq->__lock irq_context: 0 (wq_completion)bond196 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond196 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond196 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond194 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond108#4 irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond155 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond155 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond155 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond119#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond21#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond196 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond196 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond73#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond52#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond157 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond157 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond157 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond157 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond197 irq_context: 0 (wq_completion)bond197 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond197 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149#2 irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond92#4 &rq->__lock irq_context: 0 (wq_completion)bond92#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex kfence_freelist_lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &meta->lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond185 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond112#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond135#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond109#4 irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond198 irq_context: 0 (wq_completion)bond198 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond198 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond102#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond150#2 irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond110#4 irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond199 irq_context: 0 (wq_completion)bond199 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond199 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond199 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond98#4 &rq->__lock irq_context: 0 (wq_completion)bond98#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond180 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond180 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond180 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond180 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond118 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond151#2 irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond173 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond10#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond166 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond111#2 irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond181 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond181 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond40#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond40#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond40#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond109#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond56#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond161 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond161 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond34#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond34#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond34#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond43#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond43#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond43#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond44#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond44#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond44#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond166 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond166 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond41#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond41#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond41#6 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond103#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond186 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond186 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex quarantine_lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond122 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond122 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond122 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond122 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond77#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond77#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond77#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond95#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond42#6 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond42#6 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond36#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond107 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond107 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond107 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond107 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond107 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond138#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond146#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond182 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond182 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond89#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond89#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond152#2 irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond72#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond200 irq_context: 0 (wq_completion)bond200 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond200 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#2 &rq->__lock irq_context: 0 (wq_completion)bond133#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond112#4 irq_context: 0 (wq_completion)bond112#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond112#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond112#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond112#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond112#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond112#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond153#2 irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond39#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond139#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond201 irq_context: 0 (wq_completion)bond201 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond201 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond82#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond82#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &c->lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond160 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond160 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond193 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond25#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond25#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond25#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond25#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond25#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond25#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond25#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond25#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond25#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond25#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond25#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond25#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond25#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond25#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond25#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond25#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond25#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond25#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond25#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond25#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond25#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond25#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond25#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond25#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond25#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond25#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond25#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond25#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond25#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond25#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond25#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond25#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond201 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond201 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond201 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond100#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond100#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond100#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond18#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond18#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond18#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond18#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond18#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond18#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond18#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond18#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond18#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond18#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond18#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond18#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond18#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond18#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond18#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond18#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond18#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond18#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond18#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond18#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond18#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond18#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond18#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond18#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond18#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond18#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond18#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond18#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond95#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond113 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond108#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond154#2 irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#3 irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock batched_entropy_u8.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem &ei->i_es_lock kfence_freelist_lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock pool_lock#2 irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock nl_table_lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock nl_table_wait.lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock lock#8 irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock id_table_lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &dir->lock#2 irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock krc.lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &c->lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->list_lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond151 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex rcu_read_lock &pool->lock &p->pi_lock irq_context: 0 (wq_completion)bond202 irq_context: 0 (wq_completion)bond202 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond202 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond15#4 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond155#2 irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond155#2 &rq->__lock irq_context: 0 (wq_completion)bond155#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond78#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond78#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond114#3 irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond127#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond150 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond94#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond25#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond25#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond25#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond25#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond130#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond147 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond147 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond147 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond164 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond164 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond164 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond177 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond177 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond177 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond148 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond148 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond128#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond99#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond150#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond91#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond203 irq_context: 0 (wq_completion)bond203 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond203 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond203 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond203 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond156#2 irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond66 &rq->__lock irq_context: 0 (wq_completion)bond66 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond115#3 irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond204 irq_context: 0 (wq_completion)bond204 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond204 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond204 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond204 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond100#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#2 irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond120#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond182 &rq->__lock irq_context: 0 (wq_completion)bond182 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->alb_work)->work) rcu_read_lock rcu_read_lock_bh quarantine_lock irq_context: 0 (wq_completion)bond116#4 irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond158#2 irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond204 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond105#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond205 irq_context: 0 (wq_completion)bond205 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond205 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld &rq->__lock irq_context: 0 (wq_completion)mld &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond89#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond89#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond89#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond89#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond89#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond117#4 irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond159#2 irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)ipv6_addrconf (work_completion)(&(&ifa->dad_work)->work) rtnl_mutex rcu_read_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond206 irq_context: 0 (wq_completion)bond206 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond206 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond196 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond118#3 irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond160#2 irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond132#3 &rq->__lock irq_context: 0 (wq_completion)bond132#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond207 irq_context: 0 (wq_completion)bond207 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond207 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond92#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond132#2 &rq->__lock irq_context: 0 (wq_completion)bond132#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond114#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond93#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond132#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond34#3 &rq->__lock irq_context: 0 (wq_completion)bond34#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex rcu_state.exp_mutex &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond99#2 &rq->__lock irq_context: 0 (wq_completion)bond99#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond143#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond119#3 irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#2 irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond77#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond77#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond69#3 &rq->__lock irq_context: 0 (wq_completion)bond69#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond192 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond192 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond192 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond208 irq_context: 0 (wq_completion)bond208 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond208 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond120#4 irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond208 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond208 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond78#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond200 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond162#2 irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond144#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond209 irq_context: 0 (wq_completion)bond209 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond209 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond209 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond209 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond209 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond209 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond209 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond209 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&slave->notify_work)->work) &cfs_rq->removed.lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 &type->i_mutex_dir_key#3 fill_pool_map-wait-type-override rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond121#3 irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond163#2 irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond188 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond188 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond121 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond157#2 &rq->__lock irq_context: 0 (wq_completion)bond157#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond124 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond179 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond198 &rq->__lock irq_context: 0 (wq_completion)bond198 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond122#3 irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond187 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond191 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond199 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond199 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex quarantine_lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &rq->__lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock remove_cache_srcu &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond149#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond164#2 irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond211 irq_context: 0 (wq_completion)bond211 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond211 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &group->mark_mutex lock &group->inotify_data.idr_lock &____s->seqcount#2 irq_context: 0 &group->mark_mutex lock &group->inotify_data.idr_lock &____s->seqcount irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3/1 jbd2_handle &ei->i_data_sem remove_cache_srcu pool_lock#2 irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond199 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond123#3 irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond107#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond165#2 irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond76#3 &rq->__lock irq_context: 0 (wq_completion)bond76#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond126#3 &rq->__lock irq_context: 0 (wq_completion)bond126#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond191 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond191 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond191 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond191 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond191 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond191 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond41#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond41#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond41#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond41#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond41#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond41#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond41#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond41#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond41#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond41#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond41#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond41#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond41#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond41#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond41#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond41#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond41#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond41#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond41#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond41#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond41#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond41#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond41#6 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond41#6 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond170 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond170 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond170 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond136#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond204 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond204 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond204 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond204 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond204 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond74#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond94#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu quarantine_lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &c->lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &n->list_lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex remove_cache_srcu &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond147#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond80#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond212 irq_context: 0 (wq_completion)bond212 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond212 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond26#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond121 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond121 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond121 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond27#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond27#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond27#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond27#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond27#5 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond200 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond117#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &meta_group_info[i]->alloc_sem &bgl->locks[i].lock irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle &bgl->locks[i].lock irq_context: 0 (wq_completion)bond124#3 irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock stock_lock rcu_read_lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->orig_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond166#2 irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond110#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex pool_lock#2 irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex batched_entropy_u8.lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex kfence_freelist_lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &meta->lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond102#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) rcu_node_0 irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) &rcu_state.expedited_wq irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) &rcu_state.expedited_wq &p->pi_lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) &rcu_state.expedited_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond198 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond72#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond72#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond213 irq_context: 0 (wq_completion)bond213 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond213 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond125#4 irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond167#2 irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond214 irq_context: 0 (wq_completion)bond214 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond214 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond209 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond209 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond126#4 irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 sb_writers#4 sb_internal jbd2_handle &sbi->s_orphan_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#2 irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond215 irq_context: 0 (wq_completion)bond215 &rq->__lock irq_context: 0 (wq_completion)bond215 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond215 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond215 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond148#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond212 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond127#4 irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond203 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond203 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#3 &rq->__lock irq_context: 0 (wq_completion)bond107#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond169#2 irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond216 irq_context: 0 (wq_completion)bond216 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond216 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond209 &rq->__lock irq_context: 0 (wq_completion)bond209 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond69#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond167 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond128#4 irq_context: 0 (wq_completion)bond128#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond128#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond128#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond128#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond128#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events (linkwatch_work).work rtnl_mutex pcpu_alloc_mutex.wait_lock irq_context: 0 (wq_completion)bond170#2 irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond159 &rq->__lock irq_context: 0 (wq_completion)bond159 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond217 irq_context: 0 (wq_completion)bond217 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond217 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond217 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond217 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond35#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond35#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond35#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond35#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond35#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond35#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond35#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond35#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond35#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond85#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond162#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond129#4 irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) key#16 &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) key#16 pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) key#16 rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) key#16 &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) key#16 &bat_priv->tt.changes_list_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) key#16 &bat_priv->tt.changes_list_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->tt.work)->work) key#16 &bat_priv->tt.changes_list_lock pool_lock#2 irq_context: 0 (wq_completion)bond171#2 irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond218 irq_context: 0 (wq_completion)bond218 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond218 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#4 irq_context: 0 (wq_completion)bond130#4 &rq->__lock irq_context: 0 (wq_completion)bond130#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond130#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond130#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond130#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond130#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond130#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond130#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond111 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond111 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond111 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond172#2 irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#4 irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond219 irq_context: 0 (wq_completion)bond219 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond219 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond181 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond106#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond106#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond173#2 irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond186 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond65 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond65 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond169 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond169 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond63 &rq->__lock irq_context: 0 (wq_completion)bond63 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond103#3 &rq->__lock irq_context: 0 (wq_completion)bond103#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond119#3 &rq->__lock irq_context: 0 (wq_completion)bond119#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond104#4 irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond154#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond220 irq_context: 0 (wq_completion)bond220 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond220 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond209 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond209 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond209 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond209 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond209 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond172 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond172 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond172 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond166 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond63 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond63 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond63 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond108#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond77#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond133#3 irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond219 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond194 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond194 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond194 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond163#2 &rq->__lock irq_context: 0 (wq_completion)bond163#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond54#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond105#5 irq_context: 0 (wq_completion)bond105#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond105#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond105#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond105#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond105#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond105#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond105#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond221 irq_context: 0 (wq_completion)bond221 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond221 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#3 irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond61 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond61 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond61 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond61 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond115 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond115 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond115 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond200 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond207 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond207 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond207 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond151#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond175#2 irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond121#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 vlan_ioctl_mutex &sem->wait_lock irq_context: 0 (wq_completion)bond205 &rq->__lock irq_context: 0 (wq_completion)bond205 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond222 irq_context: 0 (wq_completion)bond222 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond222 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond221 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond221 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond221 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond131#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond171 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond171 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond105#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond105#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond105#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond105#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond105#5 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond136 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond136 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond136 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond104 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond104 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond106#5 irq_context: 0 (wq_completion)bond106#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond106#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond106#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond106#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond106#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond216 &rq->__lock irq_context: 0 (wq_completion)bond216 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond211 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond112#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond123#2 &rq->__lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond135#3 irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond160 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond122#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond152#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond130#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond130#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond130#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond130#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond130#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond130#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond130#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond130#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond130#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond130#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond130#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond130#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond130#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond130#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond130#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond130#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond130#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond130#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond130#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond130#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond130#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond130#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond130#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond130#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond130#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond130#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond130#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond130#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond130#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond130#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond130#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond130#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sb_writers#4 &type->i_mutex_dir_key#3 jbd2_handle key#4 irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond104 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events (work_completion)(&(&nsim_dev->trap_data->trap_report_dw)->work) &devlink->lock_key#6 &rq->__lock &cfs_rq->removed.lock irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &journal->j_state_lock &journal->j_wait_commit irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock irq_context: 0 &mm->mmap_lock sb_pagefaults jbd2_handle &journal->j_state_lock &journal->j_wait_commit &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond176#2 irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond212 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 nf_sockopt_mutex rcu_read_lock &rq->__lock irq_context: 0 nf_sockopt_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond96#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond223 irq_context: 0 (wq_completion)bond223 &rq->__lock irq_context: 0 (wq_completion)bond223 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond223 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond223 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex fs_reclaim &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#8 ima_extend_list_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond136#3 irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond219 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond107#4 irq_context: 0 (wq_completion)bond107#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond107#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond107#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond208 &rq->__lock irq_context: 0 (wq_completion)bond208 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond107#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond107#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond107#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond120#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond82#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond177#2 irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond161#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond224 irq_context: 0 (wq_completion)bond224 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond224 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond198 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond198 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond198 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond89#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond198 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond89#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond89#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond89#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond89#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond89#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond89#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond89#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond89#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond89#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond89#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond89#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond89#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond89#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond89#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond89#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond89#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond89#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond89#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond89#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond89#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond89#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond89#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond89#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond89#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond89#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond89#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond89#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond89#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond89#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond89#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond137#3 irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond69#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond155#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond108#5 irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond165 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond165 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond165 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond165 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond178#2 irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond105#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond105#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond225 irq_context: 0 (wq_completion)bond225 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond225 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond206 &rq->__lock irq_context: 0 (wq_completion)bond206 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond225 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond225 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond138#3 irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF console_owner_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF console_owner irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF console_lock console_srcu console_owner_lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF console_lock console_srcu console_owner irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_CAIF console_lock console_srcu console_owner console_owner_lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond115#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond179#2 irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond109#5 irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond226 irq_context: 0 (wq_completion)bond226 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond226 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond96#2 &rq->__lock irq_context: 0 (wq_completion)bond96#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond139#3 irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond180#2 irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &____s->seqcount irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond38#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond227 irq_context: 0 (wq_completion)bond227 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond227 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond110#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond175#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond205 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond205 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond205 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond120#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond140#3 irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond106#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)events_long (work_completion)(&(&br->gc_work)->work) rcu_read_lock &br->hash_lock &c->lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond175#2 &rq->__lock irq_context: 0 (wq_completion)bond175#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond213 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond213 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond125#4 &rq->__lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond110#5 irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181#2 irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond228 irq_context: 0 (wq_completion)bond228 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond228 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond183 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond183 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond112#4 &rq->__lock irq_context: 0 (wq_completion)bond141#3 irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond180#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond119#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond136#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond195 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond195 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond99#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond111#3 irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 key#27 irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond182#2 irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond195 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond173#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond126#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock quarantine_lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond90#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond229 irq_context: 0 (wq_completion)bond229 &rq->__lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond229 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond142#3 irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond142#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond229 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond88#4 &rq->__lock irq_context: 0 (wq_completion)bond88#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond33#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond24#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond134#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond145#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond192 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond183#2 irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#5 irq_context: 0 (wq_completion)bond112#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond112#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond112#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond112#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond112#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 rtnl_mutex &macvlan_netdev_addr_lock_key/1 _xmit_ETHER &obj_hash[i].lock irq_context: 0 rtnl_mutex &macvlan_netdev_addr_lock_key/1 _xmit_ETHER krc.lock irq_context: 0 rtnl_mutex &macvlan_netdev_addr_lock_key/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex &macvlan_netdev_addr_lock_key/1 krc.lock irq_context: 0 rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/1 irq_context: 0 rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/1 &obj_hash[i].lock irq_context: 0 rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/1 krc.lock irq_context: 0 rtnl_mutex &idev->mc_lock &macvlan_netdev_addr_lock_key/1 &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond128#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond230 irq_context: 0 (wq_completion)bond230 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond230 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond211 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond112#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond112#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond112#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond112#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond112#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond112#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond112#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond112#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond112#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond112#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond112#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond112#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond178 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond178 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond178 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond143#3 irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond183 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond190 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond217 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond217 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond217 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond184#2 irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#4 irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond231 irq_context: 0 (wq_completion)bond231 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond231 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond133#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond144#3 irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond185#2 irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond114#4 irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond232 irq_context: 0 (wq_completion)bond232 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond232 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond226 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#3 irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond183#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond186#2 irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&hsr->announce_timer) rcu_read_lock &hsr->seqnr_lock quarantine_lock irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#4 irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond138#3 &rq->__lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&bond->mcast_work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond126#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond145#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond233 irq_context: 0 (wq_completion)bond233 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond233 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#3 irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond215 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond44#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond44#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond44#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond44#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond187#2 irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&slave->notify_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond179#2 &rq->__lock irq_context: 0 (wq_completion)bond179#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond116#5 irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#5 &rq->__lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond1 (work_completion)(&(&bond->alb_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock fill_pool_map-wait-type-override pool_lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override &c->lock irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: softirq (&ndev->rs_timer) rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond109#4 &rq->__lock irq_context: 0 (wq_completion)bond109#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond234 irq_context: 0 (wq_completion)bond234 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond234 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#3 irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond115#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond125#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount#2 irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &____s->seqcount irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond227 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond82#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond82#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond82#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond82#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond82#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock &p->pi_lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_ifc_work)->work) &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond188#2 irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &group->inotify_data.idr_lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 &group->inotify_data.idr_lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond149 &rq->__lock irq_context: 0 (wq_completion)bond149 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond43#5 &rq->__lock irq_context: 0 (wq_completion)bond43#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond235 irq_context: 0 (wq_completion)bond235 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond235 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond148#3 irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond167#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond187#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock rcu_read_lock rhashtable_bucket irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock nl_table_wait.lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond188#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond104#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond127#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond109#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond226 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond226 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond226 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond226 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond226 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond112#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond112#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond112#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond112#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond112#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond110#5 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond113#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond208 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond189#2 irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond103#4 &rq->__lock irq_context: 0 (wq_completion)bond103#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond182#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond103#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond153#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond202 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond202 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond202 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond190 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond190 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond190 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond236 irq_context: 0 (wq_completion)bond236 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond236 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond166#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond231 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond231 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond231 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond216 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond236 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)wg-kex-wg2#7 (work_completion)(&peer->transmit_handshake_work) &peer->endpoint_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond149#3 irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond133#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond223 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond223 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond223 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond11#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond115#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond225 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond214 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond135 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond135 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond135 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond110#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond107#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond117#5 irq_context: 0 (wq_completion)bond117#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond117#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond117#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond117#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond117#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond117#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond190#2 irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)mld (work_completion)(&(&idev->mc_dad_work)->work) &idev->mc_lock &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond191 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond191 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond19#6 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond143 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond143 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond143 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond143 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond228 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond228 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond228 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond237 irq_context: 0 (wq_completion)bond237 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond237 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond184#2 &rq->__lock irq_context: 0 (wq_completion)bond184#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond150#3 irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) quarantine_lock irq_context: 0 (wq_completion)bond118#4 irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond191#2 irq_context: 0 (wq_completion)bond191#2 &rq->__lock irq_context: 0 (wq_completion)bond191#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond191#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond191#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond191#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond191#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond191#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond191#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond191#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond191#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond191#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond191#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond189 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond238 irq_context: 0 (wq_completion)bond238 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond238 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond162#2 &rq->__lock irq_context: 0 (wq_completion)bond162#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond186#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond151#3 irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond123#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond109#5 &rq->__lock irq_context: 0 (wq_completion)bond109#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond116#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond121#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rcu_state.gp_wq &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond220 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond192#2 irq_context: 0 (wq_completion)bond192#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond192#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond192#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond192#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond131#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond192#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond192#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond192#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond192#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond192#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond192#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &n->list_lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex free_vmap_area_lock &obj_hash[i].lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex free_vmap_area_lock pool_lock#2 irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)events pcpu_balance_work pcpu_alloc_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond239 irq_context: 0 (wq_completion)bond239 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond239 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: hardirq hrtimer_bases.lock fill_pool_map-wait-type-override &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond152#3 irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond157#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond193#2 irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond193#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond240 irq_context: 0 (wq_completion)bond240 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond240 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond240 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond240 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond100#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond100#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &base->lock fill_pool_map-wait-type-override pool_lock#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &base->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &base->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bat_events (work_completion)(&(&bat_priv->mcast.work)->work) &base->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond119#4 irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond171 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond234 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond234 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond153#3 irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#3 &rq->__lock irq_context: 0 (wq_completion)bond123#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond132#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond194#2 irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond172#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond237 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond141#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond241 irq_context: 0 (wq_completion)bond241 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond241 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond72#2 &rq->__lock irq_context: 0 (wq_completion)bond72#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond241 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond70#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond161 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond154#3 irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond195#2 irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond190#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond242 irq_context: 0 (wq_completion)bond242 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond242 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond224 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond142#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond114#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond155#3 irq_context: 0 (wq_completion)bond155#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond155#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond155#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond155#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond194 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond148#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond155#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond155#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond155#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond155#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond155#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond155#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond233 &rq->__lock irq_context: 0 (wq_completion)bond233 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond196#2 irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond241 &rq->__lock irq_context: 0 (wq_completion)bond241 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond217 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond206 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond137#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond227 &rq->__lock irq_context: 0 (wq_completion)bond227 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond243 irq_context: 0 (wq_completion)bond243 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond243 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond184#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond182#2 &rq->__lock irq_context: 0 (wq_completion)bond182#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond120#5 irq_context: 0 (wq_completion)bond120#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond120#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond120#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond120#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond120#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond120#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond120#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond120#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond120#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond120#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg1#11 (work_completion)(&peer->transmit_handshake_work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond168#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond156#3 irq_context: 0 (wq_completion)bond156#3 &rq->__lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond233 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond233 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond240 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond112#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond77#3 &rq->__lock irq_context: 0 (wq_completion)bond135#3 &rq->__lock irq_context: 0 (wq_completion)bond135#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) devices_rwsem rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) devices_rwsem rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)gid-cache-wq (work_completion)(&work->work) devices_rwsem rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond197#2 irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond197#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)wg-kex-wg2#3 (work_completion)(&peer->transmit_handshake_work) &rq->__lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &n->list_lock irq_context: 0 (wq_completion)wg-kex-wg2#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &handshake->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond244 irq_context: 0 (wq_completion)bond244 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond244 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond218 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond189#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond243 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond240 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond240 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond121#4 irq_context: 0 (wq_completion)bond121#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond121#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond121#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond121#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond121#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bat_events (work_completion)(&(&forw_packet_aggr->delayed_work)->work) &pcp->lock &zone->lock &____s->seqcount irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond196#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond157#3 irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond157#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond245 irq_context: 0 (wq_completion)bond245 &rq->__lock irq_context: 0 (wq_completion)bond245 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond245 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond245 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond198#2 irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#2 &rq->__lock irq_context: 0 (wq_completion)bond156#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond211 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond242 &rq->__lock irq_context: 0 (wq_completion)bond242 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond195#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond164#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond180#2 &rq->__lock irq_context: 0 (wq_completion)bond180#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond96#4 &rq->__lock irq_context: 0 (wq_completion)bond96#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond122#4 irq_context: 0 (wq_completion)bond122#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond122#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond122#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond122#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond230 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond165#2 &rq->__lock irq_context: 0 (wq_completion)bond165#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond123#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond122#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond122#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond122#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond122#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &wg->device_update_lock rcu_state.exp_mutex &rnp->exp_wq[2] irq_context: 0 (wq_completion)bond158#3 irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond169#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond159#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond129#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond156#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond197 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond197 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond197 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond197 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock batched_entropy_u8.lock irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock kfence_freelist_lock irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond143#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond32#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond221 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond246 irq_context: 0 (wq_completion)bond246 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond246 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond246 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond246 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond113#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond144#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond246 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond71#3 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock pool_lock#2 irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond171#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond203 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond123#4 irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond134#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond111#2 &rq->__lock irq_context: 0 (wq_completion)bond111#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond152#2 &rq->__lock irq_context: 0 (wq_completion)bond152#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond242 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond129#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond239 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond239 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond147#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond173 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond200#2 irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond159 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond159 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond159 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 &group->inotify_data.idr_lock fill_pool_map-wait-type-override &c->lock irq_context: 0 &group->inotify_data.idr_lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 &group->inotify_data.idr_lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond159#3 irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond247 irq_context: 0 (wq_completion)bond247 &rq->__lock irq_context: 0 (wq_completion)bond247 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond247 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond247 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond35#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond126#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond124#4 irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond201#2 irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond153#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond178#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 &group->mark_mutex lock &group->inotify_data.idr_lock &n->list_lock irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond201#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond108#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond112#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond160#3 irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond193 &rq->__lock irq_context: 0 (wq_completion)bond193 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond137#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&bond->mcast_work)->work) &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond160#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond101#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond138#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond242 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond242 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond242 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond242 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond165#2 (work_completion)(&(&slave->notify_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond66 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond248 irq_context: 0 (wq_completion)bond248 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond248 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond248 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond248 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond159#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond248 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond248 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond248 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond248 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond201#2 &rq->__lock irq_context: 0 (wq_completion)bond201#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond125#5 irq_context: 0 (wq_completion)bond125#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond125#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond125#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond125#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond125#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond125#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond129#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 fs_reclaim mmu_notifier_invalidate_range_start &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond125#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond82#3 &rq->__lock irq_context: 0 (wq_completion)bond125#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond220 &rq->__lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond163#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond245 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond202#2 irq_context: 0 (wq_completion)bond202#2 &rq->__lock irq_context: 0 (wq_completion)bond202#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond112#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond112#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond112#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond112#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond112#5 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond152#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond161#3 irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond150#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond116#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond139#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond149#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond209 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond209 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond249 irq_context: 0 (wq_completion)bond249 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond249 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond161#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond203#2 irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond156#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond185#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond203#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond116#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount#2 irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &____s->seqcount irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &rq->__lock irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond70#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond240 &rq->__lock irq_context: 0 (wq_completion)bond240 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond162#3 irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond123#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond250 irq_context: 0 (wq_completion)bond250 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond250 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond194#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond238 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond238 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &f->f_pos_lock &type->i_mutex_dir_key#3 sb_writers#4 jbd2_handle rcu_node_0 irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond222 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond154#3 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond204#2 irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)wg-kex-wg0#4 (work_completion)(&({ do { const void *__vpp_verify = (typeof((worker) + 0))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((worker))) *)((worker)))); (typeof((typeof(*((worker))) *)((worker)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->work) &peer->endpoint_lock rcu_read_lock_bh &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#3 irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond163#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#5 irq_context: 0 (wq_completion)bond126#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond126#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond126#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond126#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond126#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond126#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond170#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond154#2 &rq->__lock irq_context: 0 (wq_completion)bond154#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond251 irq_context: 0 (wq_completion)bond251 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond251 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond251 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond251 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond251 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond251 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond251 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond251 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond251 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond251 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond205#2 irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond108#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond164#3 irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond118#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond164#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &cfs_rq->removed.lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond205#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond125#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond125#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond125#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond125#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond125#5 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond124#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond88#4 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond126#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond126#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond126#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond126#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond126#5 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond127#5 irq_context: 0 (wq_completion)bond127#5 &rq->__lock irq_context: 0 (wq_completion)bond127#5 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond127#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond127#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond127#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond127#5 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond127#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond127#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond127#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond160#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond158#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond248 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond248 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &child->perf_event_mutex &cfs_rq->removed.lock irq_context: 0 &child->perf_event_mutex &obj_hash[i].lock irq_context: 0 &child->perf_event_mutex pool_lock#2 irq_context: 0 (wq_completion)bond158#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond151#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond247 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond247 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond252 irq_context: 0 (wq_completion)bond252 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond252 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond252 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond252 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond252 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond252 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond252 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond252 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond127#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond127#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond206#2 irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond249 &rq->__lock irq_context: 0 (wq_completion)bond249 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond165#3 irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond165#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond128#5 irq_context: 0 (wq_completion)bond128#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond128#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond128#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond128#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond128#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond128#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond232 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond253 irq_context: 0 (wq_completion)bond253 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond253 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond253 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond253 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond181#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond253 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond203 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond203 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond203 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond203 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond203 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond203 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond203 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond114#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond245 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond241 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond241 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond241 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond140#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond106#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond106#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond106#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond106#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond106#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond106#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond106#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond106#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond106#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond106#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond106#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond106#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond106#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond106#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond106#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond106#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond106#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond106#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond106#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond106#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond106#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond106#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond106#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond106#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond106#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond109#5 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 quarantine_lock irq_context: 0 (wq_completion)bond100#4 &rq->__lock irq_context: 0 (wq_completion)bond100#4 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond142#3 &rq->__lock irq_context: 0 (wq_completion)bond142#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond207#2 irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond135#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond231 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond146#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond207#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond121#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond121#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond177#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override rcu_read_lock rcu_node_0 irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6/1 fill_pool_map-wait-type-override rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond118#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 sk_lock-AF_INET6 rcu_read_lock batched_entropy_u32.lock crngs.lock irq_context: 0 (wq_completion)bond129#5 irq_context: 0 (wq_completion)bond129#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond129#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond129#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond129#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond129#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond129#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond254 irq_context: 0 (wq_completion)bond254 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond254 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond254 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond162#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond133#3 &rq->__lock irq_context: 0 (wq_completion)bond133#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond208#2 irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond208#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond252 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond252 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond244 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond244 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond244 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond220 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond220 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond220 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond179#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond167#3 irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond122#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond232 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond232 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond232 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#10 irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond198#2 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond96#4 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond248 &rq->__lock irq_context: 0 (wq_completion)bond248 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond192#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond192#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond130#5 irq_context: 0 (wq_completion)bond130#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond130#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond130#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond111#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond130#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond130#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond130#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond107#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond107#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond217 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 &sb->s_type->i_mutex_key#10 sk_lock-AF_INET6 &tcp_hashinfo.bhash[i].lock per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu) irq_context: 0 (wq_completion)bond255 irq_context: 0 (wq_completion)bond255 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond255 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond255 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond255 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond125#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond255 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond255 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond255 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond255 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond122#4 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond209#2 irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond155#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond155#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond209#2 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#3 irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond168#3 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond200#2 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond235 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond235 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond235 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond249 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond249 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond249 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock &c->lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock &ul->lock#2 irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock nl_table_lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock nl_table_wait.lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock lock#8 irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock rcu_read_lock id_table_lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &dir->lock#2 irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock krc.lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &c->lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->list_lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock &obj_hash[i].lock pool_lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override &c->lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override &____s->seqcount#2 irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &tbl->lock fill_pool_map-wait-type-override &____s->seqcount irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond204#2 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &c->lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->list_lock &c->lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &dir->lock#2 irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &n->lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock &____s->seqcount#9 irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &br->multicast_lock &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_read_lock rcu_read_lock_bh rcu_read_lock rcu_read_lock_bh rcu_read_lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock rcu_read_lock rcu_node_0 irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock irq_context: 0 (wq_completion)bond130#3 (work_completion)(&(&bond->mcast_work)->work) &p->pi_lock &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond167#2 &rq->__lock irq_context: 0 (wq_completion)bond167#2 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond131#5 irq_context: 0 (wq_completion)bond131#5 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond131#5 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#5 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond131#5 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#5 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond131#5 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond131#5 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond131#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond131#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond131#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond131#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond131#5 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond202#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond256 irq_context: 0 (wq_completion)bond256 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond256 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond256 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond256 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond167#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond256 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond124#4 (work_completion)(&(&bond->mcast_work)->work) fill_pool_map-wait-type-override pool_lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond250 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond206#2 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond169#3 irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond169#3 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond210 irq_context: 0 (wq_completion)bond210 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond210 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond115#3 &rq->__lock irq_context: 0 (wq_completion)bond115#3 &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond210 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond120#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond120#5 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond119#4 (work_completion)(&(&bond->mcast_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond257 irq_context: 0 (wq_completion)bond257 (work_completion)(&(&slave->notify_work)->work) irq_context: 0 (wq_completion)bond257 (work_completion)(&(&slave->notify_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&slave->notify_work)->work) &base->lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&slave->notify_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&slave->notify_work)->work) &rq->__lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&slave->notify_work)->work) &rq->__lock &per_cpu_ptr(group->pcpu, cpu)->seq irq_context: 0 (wq_completion)bond257 (work_completion)(&(&bond->mcast_work)->work) irq_context: 0 (wq_completion)bond257 (work_completion)(&(&bond->mcast_work)->work) &obj_hash[i].lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&bond->mcast_work)->work) &base->lock irq_context: 0 (wq_completion)bond257 (work_completion)(&(&bond->mcast_work)->work) &base->lock &obj_hash[i].lock irq_context: 0 (wq_completion)bond128#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond128#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond128#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond128#5 (work_completion)(&(&slave->notify_work)->work) rtnl_mutex.wait_lock irq_context: 0 (wq_completion)bond128#5 (work_completion)(&(&slave->notify_work)->work) &p->pi_lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex fs_reclaim mmu_notifier_invalidate_range_start irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &c->lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &n->list_lock &c->lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &bond->stats_lock/1 irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex rcu_read_lock &ndev->lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &obj_hash[i].lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex nl_table_wait.lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex net_rwsem &list->lock#2 irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &tn->lock irq_context: 0 (wq_completion)bond176#2 (work_completion)(&(&bond->mcast_work)->work) rtnl_mutex &idev->mc_lock irq_context: 0 (wq_completion)bond176#2 (wo